searchusermenu
  • 发布文章
  • 消息中心
点赞
收藏
评论
分享
原创

spinlock和spin_lock_bh对中断下半部(软中断)的作用

2024-09-06 10:11:57
9
0

在内核程序开发中,我们经常会使用spin_lock对数据修改进行保护,常规模式如下:

int g_data = 0;

functiona()

{

spin_lock(&g_lock);

//modify global shared data

g_data = 1;

spin_unlock(&g_lock);

}

看上去是没问题了,但在某些情况下,出现了deadlock, 并且原因就是在同一个cpu核上执行functiona的spin_lock 2次导致死锁.

原因是因为functiona会在线程中被执行,也会在软中断被执行. 当functiona先在线程中执行,并且处于spin_lock()和spin_unlock()之间.

此刻,同cpu核上软中断被执行,抢占了当前线程,并且该软中断也调用函数functiona, 然后执行spin_lock()函数,便出现死锁了.

修改方法是把spin_lock()/spin_unlock()改为spin_lock_bh()/spin_unlock_bh(),因为spin_lock_bh()会关闭当前cpu核中断后半部(也就是软中断softirq).

还有spin_lock_irqsave(),会关闭当前cpu核的中断和软中断.

 

spinlock 是怎么实现的?

看一下源代码:

typedef struct spinlock {
	union {
		struct raw_spinlock rlock;
 
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
		struct {
			u8 __padding[LOCK_PADSIZE];
			struct lockdep_map dep_map;
		};
#endif
	};
} spinlock_t;

typedef struct raw_spinlock {
	arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
	unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
	unsigned int magic, owner_cpu;
	void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
} raw_spinlock_t;

typedef struct {
	union {
		u32 slock;
		struct __raw_tickets {
#ifdef __ARMEB__
			u16 next;
			u16 owner;
#else
			u16 owner;
			u16 next;
#endif
		} tickets;
	};
} arch_spinlock_t;

如果忽略 CONFIG_DEBUG_LOCK_ALLOC 话,spinlock 主要包含一个arch_spinlock_t的结构,从名字可以看出,这个结构是跟体系结构有关的。

lock操作, 以spin_lock为例:

static inline void spin_lock(spinlock_t *lock)
{
    raw_spin_lock(&lock->rlock);
}

define raw_spin_lock(lock)    _raw_spin_lock(lock)

void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
{
    __raw_spin_lock(lock);
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable(); // preempt_disable()是用来关闭掉抢占的
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); // 内核用来调试用的
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
 
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
    __acquire(lock);
    arch_spin_lock(&lock->raw_lock);
}
以 x86 为例,arch_spin_trylock最终调用__ticket_spin_trylock函数。其源代码如下:
// 定义在arch/x86/include/asm/spinlock_types.h
typedef struct arch_spinlock {
    union {
        __ticketpair_t head_tail;
        struct __raw_tickets {
            __ticket_t head, tail; // 注意,x86使用的是小端模式,存在高地址空间的是tail
        } tickets;
    };
} arch_spinlock_t;

// 定义在arch/x86/include/asm中
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
    arch_spinlock_t old, new;
    // 获取旧的ticket信息
    old.tickets = ACCESS_ONCE(lock->tickets);
    // head和tail不一致,说明锁正被占用,加锁不成功
    if (old.tickets.head != old.tickets.tail)
        return 0;

    new.head_tail = old.head_tail + (1 << TICKET_SHIFT); // 将tail + 1

    /* cmpxchg is a full barrier, so nothing can move before it */
    return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}

从上述代码中可知,__ticket_spin_trylock的核心功能,就是判断自旋锁是否被占用,如果没被占用,尝试原子性地更新lock中的head_tail的值,将 tail+1,返回是否加锁成功。

 

而对于spin_lock_bh()和spin_lock_irqsave(),则分别多了一个阻止软中断和中断.

static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
 
local_irq_save(flags); //保存并关闭当前cpu核心中断
preempt_disable();
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
/*
* On lockdep we dont want the hand-coded irq-enable of
* do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#ifdef CONFIG_LOCKDEP
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
do_raw_spin_lock_flags(lock, &flags);
#endif
return flags;
}
 
 
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); //并关闭当前cpu核心软中断
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
0条评论
作者已关闭评论
gongw
6文章数
0粉丝数
gongw
6 文章 | 0 粉丝
原创

spinlock和spin_lock_bh对中断下半部(软中断)的作用

2024-09-06 10:11:57
9
0

在内核程序开发中,我们经常会使用spin_lock对数据修改进行保护,常规模式如下:

int g_data = 0;

functiona()

{

spin_lock(&g_lock);

//modify global shared data

g_data = 1;

spin_unlock(&g_lock);

}

看上去是没问题了,但在某些情况下,出现了deadlock, 并且原因就是在同一个cpu核上执行functiona的spin_lock 2次导致死锁.

原因是因为functiona会在线程中被执行,也会在软中断被执行. 当functiona先在线程中执行,并且处于spin_lock()和spin_unlock()之间.

此刻,同cpu核上软中断被执行,抢占了当前线程,并且该软中断也调用函数functiona, 然后执行spin_lock()函数,便出现死锁了.

修改方法是把spin_lock()/spin_unlock()改为spin_lock_bh()/spin_unlock_bh(),因为spin_lock_bh()会关闭当前cpu核中断后半部(也就是软中断softirq).

还有spin_lock_irqsave(),会关闭当前cpu核的中断和软中断.

 

spinlock 是怎么实现的?

看一下源代码:

typedef struct spinlock {
	union {
		struct raw_spinlock rlock;
 
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
		struct {
			u8 __padding[LOCK_PADSIZE];
			struct lockdep_map dep_map;
		};
#endif
	};
} spinlock_t;

typedef struct raw_spinlock {
	arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
	unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
	unsigned int magic, owner_cpu;
	void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
} raw_spinlock_t;

typedef struct {
	union {
		u32 slock;
		struct __raw_tickets {
#ifdef __ARMEB__
			u16 next;
			u16 owner;
#else
			u16 owner;
			u16 next;
#endif
		} tickets;
	};
} arch_spinlock_t;

如果忽略 CONFIG_DEBUG_LOCK_ALLOC 话,spinlock 主要包含一个arch_spinlock_t的结构,从名字可以看出,这个结构是跟体系结构有关的。

lock操作, 以spin_lock为例:

static inline void spin_lock(spinlock_t *lock)
{
    raw_spin_lock(&lock->rlock);
}

define raw_spin_lock(lock)    _raw_spin_lock(lock)

void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
{
    __raw_spin_lock(lock);
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable(); // preempt_disable()是用来关闭掉抢占的
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); // 内核用来调试用的
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
 
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
    __acquire(lock);
    arch_spin_lock(&lock->raw_lock);
}
以 x86 为例,arch_spin_trylock最终调用__ticket_spin_trylock函数。其源代码如下:
// 定义在arch/x86/include/asm/spinlock_types.h
typedef struct arch_spinlock {
    union {
        __ticketpair_t head_tail;
        struct __raw_tickets {
            __ticket_t head, tail; // 注意,x86使用的是小端模式,存在高地址空间的是tail
        } tickets;
    };
} arch_spinlock_t;

// 定义在arch/x86/include/asm中
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
    arch_spinlock_t old, new;
    // 获取旧的ticket信息
    old.tickets = ACCESS_ONCE(lock->tickets);
    // head和tail不一致,说明锁正被占用,加锁不成功
    if (old.tickets.head != old.tickets.tail)
        return 0;

    new.head_tail = old.head_tail + (1 << TICKET_SHIFT); // 将tail + 1

    /* cmpxchg is a full barrier, so nothing can move before it */
    return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}

从上述代码中可知,__ticket_spin_trylock的核心功能,就是判断自旋锁是否被占用,如果没被占用,尝试原子性地更新lock中的head_tail的值,将 tail+1,返回是否加锁成功。

 

而对于spin_lock_bh()和spin_lock_irqsave(),则分别多了一个阻止软中断和中断.

static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
 
local_irq_save(flags); //保存并关闭当前cpu核心中断
preempt_disable();
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
/*
* On lockdep we dont want the hand-coded irq-enable of
* do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#ifdef CONFIG_LOCKDEP
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
do_raw_spin_lock_flags(lock, &flags);
#endif
return flags;
}
 
 
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); //并关闭当前cpu核心软中断
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
文章来自个人专栏
社区专栏
6 文章 | 1 订阅
0条评论
作者已关闭评论
作者已关闭评论
0
0