Zephyr API Documentation  2.7.0-rc2
A Scalable Open Source RTOS
spinlock.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
7#define ZEPHYR_INCLUDE_SPINLOCK_H_
8
9#include <sys/atomic.h>
10#include <sys/__assert.h>
11#include <stdbool.h>
12#include <arch/cpu.h>
13
14#ifdef __cplusplus
15extern "C" {
16#endif
17
18struct z_spinlock_key {
19 int key;
20};
21
29struct k_spinlock {
30#ifdef CONFIG_SMP
32#endif
33
34#ifdef CONFIG_SPIN_VALIDATE
35 /* Stores the thread that holds the lock with the locking CPU
36 * ID in the bottom two bits.
37 */
38 uintptr_t thread_cpu;
39#endif
40
41#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
42 !defined(CONFIG_SPIN_VALIDATE)
43 /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
44 * the k_spinlock struct will have no members. The result
45 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
46 *
47 * This size difference causes problems when the k_spinlock
48 * is embedded into another struct like k_msgq, because C and
49 * C++ will have different ideas on the offsets of the members
50 * that come after the k_spinlock member.
51 *
52 * To prevent this we add a 1 byte dummy member to k_spinlock
53 * when the user selects C++ support and k_spinlock would
54 * otherwise be empty.
55 */
56 char dummy;
57#endif
58};
59
60/* There's a spinlock validation framework available when asserts are
61 * enabled. It adds a relatively hefty overhead (about 3k or so) to
62 * kernel code size, don't use on platforms known to be small.
63 */
64#ifdef CONFIG_SPIN_VALIDATE
65bool z_spin_lock_valid(struct k_spinlock *l);
66bool z_spin_unlock_valid(struct k_spinlock *l);
67void z_spin_lock_set_owner(struct k_spinlock *l);
68BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 4, "Too many CPUs for mask");
69
70# ifdef CONFIG_KERNEL_COHERENCE
71bool z_spin_lock_mem_coherent(struct k_spinlock *l);
72# endif /* CONFIG_KERNEL_COHERENCE */
73
74#endif /* CONFIG_SPIN_VALIDATE */
75
87typedef struct z_spinlock_key k_spinlock_key_t;
88
118{
119 ARG_UNUSED(l);
121
122 /* Note that we need to use the underlying arch-specific lock
123 * implementation. The "irq_lock()" API in SMP context is
124 * actually a wrapper for a global spinlock!
125 */
126 k.key = arch_irq_lock();
127
128#ifdef CONFIG_SPIN_VALIDATE
129 __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
130# ifdef CONFIG_KERNEL_COHERENCE
131 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
132# endif
133#endif
134
135#ifdef CONFIG_SMP
136 while (!atomic_cas(&l->locked, 0, 1)) {
137 }
138#endif
139
140#ifdef CONFIG_SPIN_VALIDATE
141 z_spin_lock_set_owner(l);
142#endif
143 return k;
144}
145
169{
170 ARG_UNUSED(l);
171#ifdef CONFIG_SPIN_VALIDATE
172 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
173#endif
174
175#ifdef CONFIG_SMP
176 /* Strictly we don't need atomic_clear() here (which is an
177 * exchange operation that returns the old value). We are always
178 * setting a zero and (because we hold the lock) know the existing
179 * state won't change due to a race. But some architectures need
180 * a memory barrier when used like this, and we don't have a
181 * Zephyr framework for that.
182 */
183 atomic_clear(&l->locked);
184#endif
185 arch_irq_unlock(key.key);
186}
187
188/* Internal function: releases the lock, but leaves local interrupts
189 * disabled
190 */
192{
193 ARG_UNUSED(l);
194#ifdef CONFIG_SPIN_VALIDATE
195 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
196#endif
197#ifdef CONFIG_SMP
198 atomic_clear(&l->locked);
199#endif
200}
201
202#ifdef __cplusplus
203}
204#endif
205
206#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Disable all interrupts on the local CPU.
Definition: irq.h:168
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition: irq.h:176
int atomic_t
Definition: atomic.h:21
atomic_val_t atomic_clear(atomic_t *target)
static ALWAYS_INLINE bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
Definition: atomic_xtensa.h:42
#define ALWAYS_INLINE
Definition: common.h:116
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
Definition: spinlock.h:191
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
Unlock a spin lock.
Definition: spinlock.h:167
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Lock a spinlock.
Definition: spinlock.h:117
struct z_spinlock_key k_spinlock_key_t
Spinlock key type.
Definition: spinlock.h:87
static k_spinlock_key_t key
Definition: spinlock_error_case.c:14
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:75
Kernel Spin Lock.
Definition: spinlock.h:29
atomic_t locked
Definition: spinlock.h:31
#define CONFIG_MP_NUM_CPUS
Definition: ztest.h:38