root/spinlock.c
/* [<][>][^][v][top][bottom][index][help] */
DEFINITIONS
This source file includes following definitions.
- initlock
- acquire
- release
- getcallerpcs
- holding
- pushcli
- popcli
1 // Mutual exclusion spin locks.
2
3 #include "types.h"
4 #include "defs.h"
5 #include "param.h"
6 #include "x86.h"
7 #include "memlayout.h"
8 #include "mmu.h"
9 #include "proc.h"
10 #include "spinlock.h"
11
12 void
13 initlock(struct spinlock *lk, char *name)
14 {
15 lk->name = name;
16 lk->locked = 0;
17 lk->cpu = 0;
18 }
19
20 // Acquire the lock.
21 // Loops (spins) until the lock is acquired.
22 // Holding a lock for a long time may cause
23 // other CPUs to waste time spinning to acquire it.
24 void
25 acquire(struct spinlock *lk)
26 {
27 pushcli(); // disable interrupts to avoid deadlock.
28 if(holding(lk))
29 panic("acquire");
30
31 // The xchg is atomic.
32 // It also serializes, so that reads after acquire are not
33 // reordered before it.
34 while(xchg(&lk->locked, 1) != 0)
35 ;
36
37 // Record info about lock acquisition for debugging.
38 lk->cpu = cpu;
39 getcallerpcs(&lk, lk->pcs);
40 }
41
42 // Release the lock.
43 void
44 release(struct spinlock *lk)
45 {
46 if(!holding(lk))
47 panic("release");
48
49 lk->pcs[0] = 0;
50 lk->cpu = 0;
51
52 // The xchg serializes, so that reads before release are
53 // not reordered after it. The 1996 PentiumPro manual (Volume 3,
54 // 7.2) says reads can be carried out speculatively and in
55 // any order, which implies we need to serialize here.
56 // But the 2007 Intel 64 Architecture Memory Ordering White
57 // Paper says that Intel 64 and IA-32 will not move a load
58 // after a store. So lock->locked = 0 would work here.
59 // The xchg being asm volatile ensures gcc emits it after
60 // the above assignments (and after the critical section).
61 xchg(&lk->locked, 0);
62
63 popcli();
64 }
65
66 // Record the current call stack in pcs[] by following the %ebp chain.
67 void
68 getcallerpcs(void *v, uint pcs[])
69 {
70 uint *ebp;
71 int i;
72
73 ebp = (uint*)v - 2;
74 for(i = 0; i < 10; i++){
75 if(ebp == 0 || ebp < (uint*)KERNBASE || ebp == (uint*)0xffffffff)
76 break;
77 pcs[i] = ebp[1]; // saved %eip
78 ebp = (uint*)ebp[0]; // saved %ebp
79 }
80 for(; i < 10; i++)
81 pcs[i] = 0;
82 }
83
84 // Check whether this cpu is holding the lock.
85 int
86 holding(struct spinlock *lock)
87 {
88 return lock->locked && lock->cpu == cpu;
89 }
90
91
92 // Pushcli/popcli are like cli/sti except that they are matched:
93 // it takes two popcli to undo two pushcli. Also, if interrupts
94 // are off, then pushcli, popcli leaves them off.
95
96 void
97 pushcli(void)
98 {
99 int eflags;
100
101 eflags = readeflags();
102 cli();
103 if(cpu->ncli++ == 0)
104 cpu->intena = eflags & FL_IF;
105 }
106
107 void
108 popcli(void)
109 {
110 if(readeflags()&FL_IF)
111 panic("popcli - interruptible");
112 if(--cpu->ncli < 0)
113 panic("popcli");
114 if(cpu->ncli == 0 && cpu->intena)
115 sti();
116 }
117