summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--device/if_hdr.h20
-rw-r--r--device/net_io.c5
-rw-r--r--i386/i386/db_disasm.c4
-rw-r--r--i386/i386/fpu.c12
-rw-r--r--i386/i386/fpu.h8
-rw-r--r--i386/i386/pio.h6
-rw-r--r--i386/i386/proc_reg.h8
-rw-r--r--i386/i386at/rtc.h8
-rw-r--r--i386/intel/pmap.c64
-rw-r--r--i386/intel/pmap.h50
-rw-r--r--include/device/bpf.h15
-rw-r--r--include/mach/profil.h16
-rw-r--r--include/mach/time_value.h67
-rw-r--r--kern/lock.h15
-rw-r--r--kern/processor.h10
-rw-r--r--kern/queue.h36
-rw-r--r--kern/sched.h10
-rw-r--r--kern/sched_prim.c10
-rw-r--r--kern/thread.h10
-rw-r--r--kern/timer.c7
-rw-r--r--linux/dev/glue/block.c4
-rw-r--r--linux/dev/include/linux/blk.h4
-rw-r--r--vm/pmap.h10
-rw-r--r--vm/vm_fault.c20
-rw-r--r--vm/vm_map.c17
25 files changed, 256 insertions, 180 deletions
diff --git a/device/if_hdr.h b/device/if_hdr.h
index e53983b0..7127ad9c 100644
--- a/device/if_hdr.h
+++ b/device/if_hdr.h
@@ -130,35 +130,39 @@ struct ifnet {
#define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
#define IF_DROP(ifq) ((ifq)->ifq_drops++)
-#define IF_ENQUEUE(ifq, ior) { \
+#define IF_ENQUEUE(ifq, ior) \
+MACRO_BEGIN \
simple_lock(&(ifq)->ifq_lock); \
enqueue_tail(&(ifq)->ifq_head, (queue_entry_t)ior); \
(ifq)->ifq_len++; \
simple_unlock(&(ifq)->ifq_lock); \
-}
-#define IF_PREPEND(ifq, ior) { \
+MACRO_END
+#define IF_PREPEND(ifq, ior) \
+MACRO_BEGIN \
simple_lock(&(ifq)->ifq_lock); \
enqueue_head(&(ifq)->ifq_head, (queue_entry_t)ior); \
(ifq)->ifq_len++; \
simple_unlock(&(ifq)->ifq_lock); \
-}
+MACRO_END
-#define IF_DEQUEUE(ifq, ior) { \
+#define IF_DEQUEUE(ifq, ior) \
+MACRO_BEGIN \
simple_lock(&(ifq)->ifq_lock); \
if (((ior) = (io_req_t)dequeue_head(&(ifq)->ifq_head)) != 0) \
(ifq)->ifq_len--; \
simple_unlock(&(ifq)->ifq_lock); \
-}
+MACRO_END
#define IFQ_MAXLEN 50
-#define IFQ_INIT(ifq) { \
+#define IFQ_INIT(ifq) \
+MACRO_BEGIN \
queue_init(&(ifq)->ifq_head); \
simple_lock_init(&(ifq)->ifq_lock); \
(ifq)->ifq_len = 0; \
(ifq)->ifq_maxlen = IFQ_MAXLEN; \
(ifq)->ifq_drops = 0; \
-}
+MACRO_END
#define IFNET_SLOWHZ 1 /* granularity is 1 second */
diff --git a/device/net_io.c b/device/net_io.c
index efde9d6c..01c6a707 100644
--- a/device/net_io.c
+++ b/device/net_io.c
@@ -352,10 +352,11 @@ def_simple_lock_data(static,net_hash_header_lock)
#define FILTER_ITERATE_END }
/* entry_p must be net_rcv_port_t or net_hash_entry_t */
-#define ENQUEUE_DEAD(dead, entry_p, chain) { \
+#define ENQUEUE_DEAD(dead, entry_p, chain) \
+MACRO_BEGIN \
(entry_p)->chain.next = (queue_entry_t) (dead); \
(dead) = (queue_entry_t)(entry_p); \
-}
+MACRO_END
/*
* ethernet_priority:
diff --git a/i386/i386/db_disasm.c b/i386/i386/db_disasm.c
index 303b4621..72d69740 100644
--- a/i386/i386/db_disasm.c
+++ b/i386/i386/db_disasm.c
@@ -857,8 +857,10 @@ int db_lengths[] = {
};
#define get_value_inc(result, loc, size, is_signed, task) \
+MACRO_BEGIN \
result = db_get_task_value((loc), (size), (is_signed), (task)); \
- (loc) += (size);
+ (loc) += (size); \
+MACRO_END
/*
* Read address at location and return updated location.
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c
index 5bbad541..967ba91d 100644
--- a/i386/i386/fpu.c
+++ b/i386/i386/fpu.c
@@ -59,12 +59,12 @@
#if 0
#include <i386/ipl.h>
#define ASSERT_IPL(L) \
-{ \
+MACRO_BEGIN \
if (curr_ipl[cpu_number()] != L) { \
printf("IPL is %d, expected %d\n", curr_ipl[cpu_number()], L); \
panic("fpu: wrong ipl"); \
} \
-}
+MACRO_END
#else
#define ASSERT_IPL(L)
#endif
@@ -91,16 +91,16 @@ volatile thread_t fp_intr_thread = THREAD_NULL;
#define clear_fpu() \
- { \
+ MACRO_BEGIN \
set_ts(); \
fp_thread = THREAD_NULL; \
- }
+ MACRO_END
#else /* NCPUS > 1 */
#define clear_fpu() \
- { \
+ MACRO_BEGIN \
set_ts(); \
- }
+ MACRO_END
#endif
diff --git a/i386/i386/fpu.h b/i386/i386/fpu.h
index 83a8f2d6..df347a26 100644
--- a/i386/i386/fpu.h
+++ b/i386/i386/fpu.h
@@ -200,7 +200,7 @@ static inline void set_xcr0(uint64_t value) {
*/
#if NCPUS > 1
#define fpu_save_context(thread) \
- { \
+ MACRO_BEGIN \
struct i386_fpsave_state *ifps; \
ifps = (thread)->pcb->ims.ifps; \
if (ifps != 0 && !ifps->fp_valid) { \
@@ -208,13 +208,13 @@ static inline void set_xcr0(uint64_t value) {
fpu_save(ifps); \
set_ts(); \
} \
- }
+ MACRO_END
#else /* NCPUS == 1 */
#define fpu_save_context(thread) \
- { \
+ MACRO_BEGIN \
set_ts(); \
- }
+ MACRO_END
#endif /* NCPUS == 1 */
diff --git a/i386/i386/pio.h b/i386/i386/pio.h
index c488fbb2..1b473d0b 100644
--- a/i386/i386/pio.h
+++ b/i386/i386/pio.h
@@ -48,14 +48,14 @@
#define outl(x, y) \
-{ asm volatile("outl %0, %1" : : "a" ((unsigned int)(y)) , "dN" ((unsigned short)(x))); }
+MACRO_BEGIN asm volatile("outl %0, %1" : : "a" ((unsigned int)(y)) , "dN" ((unsigned short)(x))); MACRO_END
#define outw(x, y) \
-{ asm volatile("outw %0, %1" : : "a" ((unsigned short)(y)) , "dN" ((unsigned short)(x))); }
+MACRO_BEGIN asm volatile("outw %0, %1" : : "a" ((unsigned short)(y)) , "dN" ((unsigned short)(x))); MACRO_END
#define outb(x, y) \
-{ asm volatile("outb %0, %1" : : "a" ((unsigned char)(y)) , "dN" ((unsigned short)(x))); }
+MACRO_BEGIN asm volatile("outb %0, %1" : : "a" ((unsigned char)(y)) , "dN" ((unsigned short)(x))); MACRO_END
#endif /* _I386_PIO_H_ */
diff --git a/i386/i386/proc_reg.h b/i386/i386/proc_reg.h
index 704676cf..b7245550 100644
--- a/i386/i386/proc_reg.h
+++ b/i386/i386/proc_reg.h
@@ -383,22 +383,22 @@ extern unsigned long cr3;
* use them :/ */
#ifdef __x86_64__
#define cpuid(eax, ebx, ecx, edx) \
-{ \
+MACRO_BEGIN \
uint64_t sav_rbx; \
asm( "mov %%rbx,%2\n\t" \
"cpuid\n\t" \
"xchg %2,%%rbx\n\t" \
"movl %k2,%1\n\t" \
: "+a" (eax), "=m" (ebx), "=&r" (sav_rbx), "+c" (ecx), "=&d" (edx)); \
-}
+MACRO_END
#else
#define cpuid(eax, ebx, ecx, edx) \
-{ \
+MACRO_BEGIN \
asm ( "mov %%ebx,%1\n\t" \
"cpuid\n\t" \
"xchg %%ebx,%1\n\t" \
: "+a" (eax), "=&SD" (ebx), "+c" (ecx), "=&d" (edx)); \
-}
+MACRO_END
#endif
#endif /* __GNUC__ */
diff --git a/i386/i386at/rtc.h b/i386/i386at/rtc.h
index 53797221..5f43ba12 100644
--- a/i386/i386at/rtc.h
+++ b/i386/i386at/rtc.h
@@ -116,26 +116,26 @@ struct rtc_st {
* this macro reads contents of real time clock to specified buffer
*/
#define load_rtc(regs) \
-{\
+MACRO_BEGIN \
int i; \
\
for (i = 0; i < RTC_NREG; i++) { \
outb(RTC_ADDR, i); \
regs[i] = inb(RTC_DATA); \
} \
-}
+MACRO_END
/*
* this macro writes contents of specified buffer to real time clock
*/
#define save_rtc(regs) \
-{ \
+MACRO_BEGIN \
int i; \
for (i = 0; i < RTC_NREGP; i++) { \
outb(RTC_ADDR, i); \
outb(RTC_DATA, regs[i]);\
} \
-}
+MACRO_END
extern int readtodc(uint64_t *tp);
extern int writetodc(void);
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index b496353c..49ffd579 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -128,20 +128,22 @@ pv_entry_t pv_head_table; /* array of entries, one per page */
pv_entry_t pv_free_list; /* free list at SPLVM */
def_simple_lock_data(static, pv_free_list_lock)
-#define PV_ALLOC(pv_e) { \
+#define PV_ALLOC(pv_e) \
+MACRO_BEGIN \
simple_lock(&pv_free_list_lock); \
if ((pv_e = pv_free_list) != 0) { \
pv_free_list = pv_e->next; \
} \
simple_unlock(&pv_free_list_lock); \
-}
+MACRO_END
-#define PV_FREE(pv_e) { \
+#define PV_FREE(pv_e) \
+MACRO_BEGIN \
simple_lock(&pv_free_list_lock); \
pv_e->next = pv_free_list; \
pv_free_list = pv_e; \
simple_unlock(&pv_free_list_lock); \
-}
+MACRO_END
struct kmem_cache pv_list_cache; /* cache of pv_entry structures */
@@ -242,54 +244,61 @@ vm_object_t pmap_object = VM_OBJECT_NULL;
* interrupts during pmap operations. We must take the CPU out of
* the cpus_active set while interrupts are blocked.
*/
-#define SPLVM(spl) { \
+#define SPLVM(spl) \
+MACRO_BEGIN \
spl = splvm(); \
i_bit_clear(cpu_number(), &cpus_active); \
-}
+MACRO_END
-#define SPLX(spl) { \
+#define SPLX(spl) \
+MACRO_BEGIN \
i_bit_set(cpu_number(), &cpus_active); \
splx(spl); \
-}
+MACRO_END
/*
* Lock on pmap system
*/
lock_data_t pmap_system_lock;
-#define PMAP_READ_LOCK(pmap, spl) { \
+#define PMAP_READ_LOCK(pmap, spl) \
+MACRO_BEGIN \
SPLVM(spl); \
lock_read(&pmap_system_lock); \
simple_lock(&(pmap)->lock); \
-}
+MACRO_END
-#define PMAP_WRITE_LOCK(spl) { \
+#define PMAP_WRITE_LOCK(spl) \
+MACRO_BEGIN \
SPLVM(spl); \
lock_write(&pmap_system_lock); \
-}
+MACRO_END
-#define PMAP_READ_UNLOCK(pmap, spl) { \
+#define PMAP_READ_UNLOCK(pmap, spl) \
+MACRO_BEGIN \
simple_unlock(&(pmap)->lock); \
lock_read_done(&pmap_system_lock); \
SPLX(spl); \
-}
+MACRO_END
-#define PMAP_WRITE_UNLOCK(spl) { \
+#define PMAP_WRITE_UNLOCK(spl) \
+MACRO_BEGIN \
lock_write_done(&pmap_system_lock); \
SPLX(spl); \
-}
+MACRO_END
-#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
+#define PMAP_WRITE_TO_READ_LOCK(pmap) \
+MACRO_BEGIN \
simple_lock(&(pmap)->lock); \
lock_write_to_read(&pmap_system_lock); \
-}
+MACRO_END
#define LOCK_PVH(index) (lock_pvh_pai(index))
#define UNLOCK_PVH(index) (unlock_pvh_pai(index))
#define PMAP_UPDATE_TLBS(pmap, s, e) \
-{ \
+MACRO_BEGIN \
cpu_set cpu_mask = 1 << cpu_number(); \
cpu_set users; \
\
@@ -310,7 +319,7 @@ lock_data_t pmap_system_lock;
if ((pmap)->cpus_using & cpu_mask) { \
INVALIDATE_TLB((pmap), (s), (e)); \
} \
-}
+MACRO_END
#else /* NCPUS > 1 */
@@ -326,33 +335,36 @@ lock_data_t pmap_system_lock;
#define LOCK_PVH(index)
#define UNLOCK_PVH(index)
-#define PMAP_UPDATE_TLBS(pmap, s, e) { \
+#define PMAP_UPDATE_TLBS(pmap, s, e) \
+MACRO_BEGIN \
/* invalidate our own TLB if pmap is in use */ \
if ((pmap)->cpus_using) { \
INVALIDATE_TLB((pmap), (s), (e)); \
} \
-}
+MACRO_END
#endif /* NCPUS > 1 */
#ifdef MACH_PV_PAGETABLES
-#define INVALIDATE_TLB(pmap, s, e) do { \
+#define INVALIDATE_TLB(pmap, s, e) \
+MACRO_BEGIN \
if (__builtin_constant_p((e) - (s)) \
&& (e) - (s) == PAGE_SIZE) \
hyp_invlpg((pmap) == kernel_pmap ? kvtolin(s) : (s)); \
else \
hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL); \
-} while(0)
+MACRO_END
#else /* MACH_PV_PAGETABLES */
/* It is hard to know when a TLB flush becomes less expensive than a bunch of
* invlpgs. But it surely is more expensive than just one invlpg. */
-#define INVALIDATE_TLB(pmap, s, e) do { \
+#define INVALIDATE_TLB(pmap, s, e) \
+MACRO_BEGIN \
if (__builtin_constant_p((e) - (s)) \
&& (e) - (s) == PAGE_SIZE) \
invlpg_linear((pmap) == kernel_pmap ? kvtolin(s) : (s)); \
else \
flush_tlb(); \
-} while (0)
+MACRO_END
#endif /* MACH_PV_PAGETABLES */
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 8b0eba0d..becdbe4c 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -317,7 +317,8 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
* fields to control TLB invalidation on other CPUS.
*/
-#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+#define PMAP_ACTIVATE_KERNEL(my_cpu) \
+MACRO_BEGIN \
\
/* \
* Let pmap updates proceed while we wait for this pmap. \
@@ -348,17 +349,19 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
i_bit_set((my_cpu), &cpus_active); \
\
simple_unlock(&kernel_pmap->lock); \
-}
+MACRO_END
-#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) \
+MACRO_BEGIN \
/* \
* Mark pmap no longer in use by this cpu even if \
* pmap is locked against updates. \
*/ \
i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
-}
+MACRO_END
-#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) \
+MACRO_BEGIN \
pmap_t tpmap = (pmap); \
\
if (tpmap == kernel_pmap) { \
@@ -398,9 +401,10 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
\
simple_unlock(&tpmap->lock); \
} \
-}
+MACRO_END
-#define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) { \
+#define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) \
+MACRO_BEGIN \
pmap_t tpmap = (pmap); \
\
/* \
@@ -413,9 +417,10 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
*/ \
i_bit_clear((my_cpu), &(pmap)->cpus_using); \
} \
-}
+MACRO_END
-#define MARK_CPU_IDLE(my_cpu) { \
+#define MARK_CPU_IDLE(my_cpu) \
+MACRO_BEGIN \
/* \
* Mark this cpu idle, and remove it from the active set, \
* since it is not actively using any pmap. Signal_cpus \
@@ -427,9 +432,10 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
i_bit_set((my_cpu), &cpus_idle); \
i_bit_clear((my_cpu), &cpus_active); \
splx(s); \
-}
+MACRO_END
-#define MARK_CPU_ACTIVE(my_cpu) { \
+#define MARK_CPU_ACTIVE(my_cpu) \
+MACRO_BEGIN \
\
int s = splvm(); \
/* \
@@ -453,7 +459,7 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
*/ \
i_bit_set((my_cpu), &cpus_active); \
splx(s); \
-}
+MACRO_END
#else /* NCPUS > 1 */
@@ -462,17 +468,20 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
* in use.
*/
-#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+#define PMAP_ACTIVATE_KERNEL(my_cpu) \
+MACRO_BEGIN \
(void) (my_cpu); \
kernel_pmap->cpus_using = TRUE; \
-}
+MACRO_END
-#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) \
+MACRO_BEGIN\ \
(void) (my_cpu); \
kernel_pmap->cpus_using = FALSE; \
-}
+MACRO_END
-#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) \
+MACRO_BEGIN \
pmap_t tpmap = (pmap); \
(void) (th); \
(void) (my_cpu); \
@@ -481,14 +490,15 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
if (tpmap != kernel_pmap) { \
tpmap->cpus_using = TRUE; \
} \
-}
+MACRO_END
-#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
+#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) \
+MACRO_BEGIN \
(void) (thread); \
(void) (cpu); \
if ((pmap) != kernel_pmap) \
(pmap)->cpus_using = FALSE; \
-}
+MACRO_END
#endif /* NCPUS > 1 */
diff --git a/include/device/bpf.h b/include/device/bpf.h
index abc2d777..0f80318e 100644
--- a/include/device/bpf.h
+++ b/include/device/bpf.h
@@ -201,28 +201,31 @@ typedef struct bpf_insn *bpf_insn_t;
#define BPF_JUMP(code, k, jt, jf) { (unsigned short)(code), jt, jf, k }
#define BPF_RETMATCH(code, k, nkey) { (unsigned short)(code), nkey, 0, k }
-#define BPF_INSN_STMT(pc, c, n) {\
+#define BPF_INSN_STMT(pc, c, n) \
+MACRO_BEGIN \
(pc)->code = (c); \
(pc)->jt = (pc)->jf = 0; \
(pc)->k = (n); \
(pc)++; \
-}
+MACRO_END
-#define BPF_INSN_JUMP(pc, c, n, jtrue, jfalse) {\
+#define BPF_INSN_JUMP(pc, c, n, jtrue, jfalse) \
+MACRO_BEGIN \
(pc)->code = (c); \
(pc)->jt = (jtrue); \
(pc)->jf = (jfalse); \
(pc)->k = (n); \
(pc)++; \
-}
+MACRO_END
-#define BPF_INSN_RETMATCH(pc, c, n, nkey) {\
+#define BPF_INSN_RETMATCH(pc, c, n, nkey) \
+MACRO_BEGIN \
(pc)->code = (c); \
(pc)->jt = (nkey); \
(pc)->jf = 0; \
(pc)->k = (n); \
(pc)++; \
-}
+MACRO_END
/*
* Number of scratch memory words (for BPF_LD|BPF_MEM and BPF_ST).
diff --git a/include/mach/profil.h b/include/mach/profil.h
index 866f267b..aa92bc4d 100644
--- a/include/mach/profil.h
+++ b/include/mach/profil.h
@@ -96,7 +96,7 @@ typedef struct buffer *buffer_t;
extern vm_map_t kernel_map;
#define dealloc_pbuf_area(pbuf) \
- { \
+ MACRO_BEGIN \
register int i; \
\
for(i=0; i < NB_PROF_BUFFER ; i++) \
@@ -106,10 +106,11 @@ extern vm_map_t kernel_map;
kmem_free(kernel_map, \
(vm_offset_t)(pbuf), \
sizeof(struct prof_data)); \
- }
+ MACRO_END
#define alloc_pbuf_area(pbuf, vmpbuf) \
+MACRO_BEGIN \
(vmpbuf) = (vm_offset_t) 0; \
if (kmem_alloc(kernel_map, &(vmpbuf) , sizeof(struct prof_data)) == \
KERN_SUCCESS) { \
@@ -130,7 +131,8 @@ extern vm_map_t kernel_map;
} \
} \
else \
- (pbuf) = NULLPBUF;
+ (pbuf) = NULLPBUF; \
+MACRO_END
@@ -146,7 +148,7 @@ extern vm_map_t kernel_map;
*/
#define set_pbuf_value(pbuf, val) \
- { \
+ MACRO_BEGIN \
register buffer_t a = &((pbuf)->prof_area[(pbuf)->prof_index]); \
register int i = a->p_index++; \
register boolean_t f = a->p_full; \
@@ -162,16 +164,16 @@ extern vm_map_t kernel_map;
else \
*(val) = 1; \
} \
- }
+ MACRO_END
#define reset_pbuf_area(pbuf) \
- { \
+ MACRO_BEGIN \
register int *i = &((pbuf)->prof_index); \
\
*i = (*i == NB_PROF_BUFFER-1) ? 0 : ++(*i); \
(pbuf)->prof_area[*i].p_index = 0; \
- }
+ MACRO_END
/**************************************************************/
diff --git a/include/mach/time_value.h b/include/mach/time_value.h
index 7378c2cf..0c89f4eb 100644
--- a/include/mach/time_value.h
+++ b/include/mach/time_value.h
@@ -93,7 +93,8 @@ static __inline__ time_value_t convert_time_value_from_user(rpc_time_value_t tv)
#define time_value64_assert(val) \
assert(0 <= (val)->nanoseconds && (val)->nanoseconds < TIME_NANOS_MAX);
-#define time_value_add_usec(val, micros) { \
+#define time_value_add_usec(val, micros) \
+MACRO_BEGIN \
time_value_assert(val); \
if (((val)->microseconds += (micros)) \
>= TIME_MICROS_MAX) { \
@@ -101,9 +102,10 @@ static __inline__ time_value_t convert_time_value_from_user(rpc_time_value_t tv)
(val)->seconds++; \
} \
time_value_assert(val); \
-}
+MACRO_END
-#define time_value64_add_nanos(val, nanos) { \
+#define time_value64_add_nanos(val, nanos) \
+MACRO_BEGIN \
time_value64_assert(val); \
if (((val)->nanoseconds += (nanos)) \
>= TIME_NANOS_MAX) { \
@@ -111,49 +113,56 @@ static __inline__ time_value_t convert_time_value_from_user(rpc_time_value_t tv)
(val)->seconds++; \
} \
time_value64_assert(val); \
-}
+MACRO_END
-#define time_value64_sub_nanos(val, nanos) { \
+#define time_value64_sub_nanos(val, nanos) \
+MACRO_BEGIN \
time_value64_assert(val); \
if (((val)->nanoseconds -= (nanos)) < 0) { \
(val)->nanoseconds += TIME_NANOS_MAX; \
(val)->seconds--; \
} \
- time_value64_assert(val); \
-}
+ time_value64_assert(val); \
+MACRO_END
-#define time_value_add(result, addend) { \
+#define time_value_add(result, addend) \
+MACRO_BEGIN \
time_value_assert(addend); \
(result)->seconds += (addend)->seconds; \
time_value_add_usec(result, (addend)->microseconds); \
- }
+MACRO_END
-#define time_value64_add(result, addend) { \
+#define time_value64_add(result, addend) \
+MACRO_BEGIN \
time_value64_assert(addend); \
(result)->seconds += (addend)->seconds; \
time_value64_add_nanos(result, (addend)->nanoseconds); \
- }
+MACRO_END
-#define time_value64_sub(result, subtrahend) { \
+#define time_value64_sub(result, subtrahend) \
+MACRO_BEGIN \
time_value64_assert(subtrahend); \
(result)->seconds -= (subtrahend)->seconds; \
time_value64_sub_nanos(result, (subtrahend)->nanoseconds); \
- }
+MACRO_END
-#define time_value64_init(tv) { \
+#define time_value64_init(tv) \
+MACRO_BEGIN \
(tv)->seconds = 0; \
(tv)->nanoseconds = 0; \
- }
+MACRO_END
-#define TIME_VALUE64_TO_TIME_VALUE(tv64, tv) do { \
+#define TIME_VALUE64_TO_TIME_VALUE(tv64, tv) \
+MACRO_BEGIN \
(tv)->seconds = (tv64)->seconds; \
(tv)->microseconds = (tv64)->nanoseconds / 1000; \
-} while(0)
+MACRO_END
-#define TIME_VALUE_TO_TIME_VALUE64(tv, tv64) do { \
+#define TIME_VALUE_TO_TIME_VALUE64(tv, tv64) \
+MACRO_BEGIN \
(tv64)->seconds = (tv)->seconds; \
(tv64)->nanoseconds = (tv)->microseconds * 1000; \
-} while(0)
+MACRO_END
/*
* Time value available through the mapped-time interface.
@@ -178,26 +187,30 @@ typedef struct mapped_time_value {
/* Macros for converting between struct timespec and time_value_t. */
-#define TIME_VALUE_TO_TIMESPEC(tv, ts) do { \
+#define TIME_VALUE_TO_TIMESPEC(tv, ts) \
+MACRO_BEGIN \
(ts)->tv_sec = (tv)->seconds; \
(ts)->tv_nsec = (tv)->microseconds * 1000; \
-} while(0)
+MACRO_END
-#define TIMESPEC_TO_TIME_VALUE(tv, ts) do { \
+#define TIMESPEC_TO_TIME_VALUE(tv, ts) \
+MACRO_BEGIN \
(tv)->seconds = (ts)->tv_sec; \
(tv)->microseconds = (ts)->tv_nsec / 1000; \
-} while(0)
+MACRO_END
/* Macros for converting between struct timespec and time_value64_t. */
-#define TIME_VALUE64_TO_TIMESPEC(tv, ts) do { \
+#define TIME_VALUE64_TO_TIMESPEC(tv, ts) \
+MACRO_BEGIN \
(ts)->tv_sec = (tv)->seconds; \
(ts)->tv_nsec = (tv)->nanoseconds; \
-} while(0)
+MACRO_END
-#define TIMESPEC_TO_TIME_VALUE64(tv, ts) do { \
+#define TIMESPEC_TO_TIME_VALUE64(tv, ts) \
+MACRO_BEGIN \
(tv)->seconds = (ts)->tv_sec; \
(tv)->nanoseconds = (ts)->tv_nsec; \
-} while(0)
+MACRO_END
#endif /* _MACH_TIME_VALUE_H_ */
diff --git a/kern/lock.h b/kern/lock.h
index 6c976eab..704118f0 100644
--- a/kern/lock.h
+++ b/kern/lock.h
@@ -267,18 +267,20 @@ extern unsigned long in_interrupt[NCPUS];
/* These are defined elsewhere with lock monitoring */
#if MACH_LOCK_MON == 0
-#define simple_lock(l) do { \
+#define simple_lock(l) \
+MACRO_BEGIN \
lock_check_no_interrupts(); \
simple_lock_nocheck(l); \
-} while (0)
+MACRO_END
#define simple_lock_try(l) ({ \
lock_check_no_interrupts(); \
simple_lock_try_nocheck(l); \
})
-#define simple_unlock(l) do { \
+#define simple_unlock(l) \
+MACRO_BEGIN \
lock_check_no_interrupts(); \
simple_unlock_nocheck(l); \
-} while (0)
+MACRO_END
#endif
/* _irq variants */
@@ -302,10 +304,11 @@ class simple_lock_irq_data_t name;
simple_lock_nocheck(&(l)->slock); \
__s; \
})
-#define simple_unlock_irq(s, l) do { \
+#define simple_unlock_irq(s, l) \
+MACRO_BEGIN \
simple_unlock_nocheck(&(l)->slock); \
splx(s); \
-} while (0)
+MACRO_END
#if MACH_KDB
extern void db_show_all_slocks(void);
diff --git a/kern/processor.h b/kern/processor.h
index c90d0529..3bb4dfbf 100644
--- a/kern/processor.h
+++ b/kern/processor.h
@@ -90,14 +90,16 @@ extern struct processor_set *slave_pset;
#endif
#ifdef MACH_LDEBUG
-#define pset_idle_lock() do { \
+#define pset_idle_lock() \
+MACRO_BEGIN \
assert_splsched(); \
simple_lock_nocheck(&pset->idle_lock); \
-} while (0)
-#define pset_idle_unlock() do { \
+MACRO_END
+#define pset_idle_unlock() \
+MACRO_BEGIN \
assert_splsched(); \
simple_unlock_nocheck(&pset->idle_lock); \
-} while (0)
+MACRO_END
#else
#define pset_idle_lock() simple_lock_nocheck(&pset->idle_lock)
#define pset_idle_unlock() simple_unlock_nocheck(&pset->idle_lock)
diff --git a/kern/queue.h b/kern/queue.h
index f0b4002f..b2cd3073 100644
--- a/kern/queue.h
+++ b/kern/queue.h
@@ -187,7 +187,7 @@ void insque(queue_entry_t, queue_entry_t);
* <field> is the chain field in (*<type>)
*/
#define queue_enter(head, elt, type, field) \
-{ \
+MACRO_BEGIN \
queue_assert(head); \
queue_assert(&(elt)->field); \
queue_entry_t prev; \
@@ -202,7 +202,7 @@ void insque(queue_entry_t, queue_entry_t);
(elt)->field.prev = prev; \
(elt)->field.next = head; \
(head)->prev = (queue_entry_t) elt; \
-}
+MACRO_END
/*
* Macro: queue_enter_first
@@ -216,7 +216,7 @@ void insque(queue_entry_t, queue_entry_t);
* <field> is the chain field in (*<type>)
*/
#define queue_enter_first(head, elt, type, field) \
-{ \
+MACRO_BEGIN \
queue_assert(head); \
queue_assert(&(elt)->field); \
queue_entry_t next; \
@@ -231,7 +231,7 @@ void insque(queue_entry_t, queue_entry_t);
(elt)->field.next = next; \
(elt)->field.prev = head; \
(head)->next = (queue_entry_t) elt; \
-}
+MACRO_END
/*
* Macro: queue_field [internal use only]
@@ -251,7 +251,7 @@ void insque(queue_entry_t, queue_entry_t);
* arguments as in queue_enter
*/
#define queue_remove(head, elt, type, field) \
-{ \
+MACRO_BEGIN \
queue_assert(head); \
queue_assert(&(elt)->field); \
queue_entry_t next, prev; \
@@ -268,7 +268,7 @@ void insque(queue_entry_t, queue_entry_t);
(head)->next = next; \
else \
((type)prev)->field.next = next; \
-}
+MACRO_END
/*
* Macro: queue_remove_first
@@ -280,7 +280,7 @@ void insque(queue_entry_t, queue_entry_t);
* entry is returned by reference
*/
#define queue_remove_first(head, entry, type, field) \
-{ \
+MACRO_BEGIN \
queue_assert(head); \
queue_assert(&(entry)->field); \
queue_entry_t next; \
@@ -293,7 +293,7 @@ void insque(queue_entry_t, queue_entry_t);
else \
((type)(next))->field.prev = (head); \
(head)->next = next; \
-}
+MACRO_END
/*
* Macro: queue_remove_last
@@ -305,7 +305,7 @@ void insque(queue_entry_t, queue_entry_t);
* entry is returned by reference
*/
#define queue_remove_last(head, entry, type, field) \
-{ \
+MACRO_BEGIN \
queue_assert(head); \
queue_assert(&(entry)->field); \
queue_entry_t prev; \
@@ -318,19 +318,19 @@ void insque(queue_entry_t, queue_entry_t);
else \
((type)(prev))->field.next = (head); \
(head)->prev = prev; \
-}
+MACRO_END
/*
* Macro: queue_assign
*/
#define queue_assign(to, from, type, field) \
-{ \
+MACRO_BEGIN \
queue_assert(&(to)->field); \
queue_assert(&(from)->field); \
((type)((from)->prev))->field.next = (to); \
((type)((from)->next))->field.prev = (to); \
*to = *from; \
-}
+MACRO_END
/*
* Macro: queue_iterate
@@ -366,23 +366,27 @@ typedef struct mpqueue_head mpqueue_head_t;
#define round_mpq(size) (size)
#define mpqueue_init(q) \
- { \
+ MACRO_BEGIN \
queue_init(&(q)->head); \
simple_lock_init(&(q)->lock); \
- }
+ MACRO_END
#define mpenqueue_tail(q, elt) \
+ MACRO_BEGIN \
simple_lock(&(q)->lock); \
enqueue_tail(&(q)->head, elt); \
- simple_unlock(&(q)->lock);
+ simple_unlock(&(q)->lock); \
+ MACRO_END
#define mpdequeue_head(q, elt) \
+ MACRO_BEGIN \
simple_lock(&(q)->lock); \
if (queue_empty(&(q)->head)) \
*(elt) = 0; \
else \
*(elt) = dequeue_head(&(q)->head); \
- simple_unlock(&(q)->lock);
+ simple_unlock(&(q)->lock); \
+ MACRO_END
/*
* Old queue stuff, will go away soon.
diff --git a/kern/sched.h b/kern/sched.h
index d7e74d3a..0acbd3ed 100644
--- a/kern/sched.h
+++ b/kern/sched.h
@@ -76,14 +76,16 @@ typedef struct run_queue *run_queue_t;
/* Shall be taken at splsched only */
#ifdef MACH_LDEBUG
-#define runq_lock(rq) do { \
+#define runq_lock(rq) \
+MACRO_BEGIN \
assert_splsched(); \
simple_lock_nocheck(&(rq)->lock); \
-} while (0)
-#define runq_unlock(rq) do { \
+MACRO_END
+#define runq_unlock(rq) \
+MACRO_BEGIN \
assert_splsched(); \
simple_unlock_nocheck(&(rq)->lock); \
-} while (0)
+MACRO_END
#else
#define runq_lock(rq) simple_lock_nocheck(&(rq)->lock)
#define runq_unlock(rq) simple_unlock_nocheck(&(rq)->lock)
diff --git a/kern/sched_prim.c b/kern/sched_prim.c
index 4f719357..bcbfa160 100644
--- a/kern/sched_prim.c
+++ b/kern/sched_prim.c
@@ -133,14 +133,16 @@ decl_simple_lock_data(static, wait_lock[NUMQUEUES]) /* Lock for... */
queue_head_t wait_queue[NUMQUEUES];
#ifdef MACH_LDEBUG
-#define waitq_lock(wl) do { \
+#define waitq_lock(wl) \
+MACRO_BEGIN \
assert_splsched(); \
simple_lock_nocheck(wl); \
-} while (0)
-#define waitq_unlock(wl) do { \
+MACRO_END
+#define waitq_unlock(wl) \
+MACRO_BEGIN \
assert_splsched(); \
simple_unlock_nocheck(wl); \
-} while (0)
+MACRO_END
#else
#define waitq_lock(wl) simple_lock_nocheck(wl)
#define waitq_unlock(wl) simple_unlock_nocheck(wl)
diff --git a/kern/thread.h b/kern/thread.h
index 4c7ef507..0702e1b4 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -406,14 +406,16 @@ extern void thread_unfreeze(
/* Shall be taken at splsched only */
#ifdef MACH_LDEBUG
-#define thread_lock(th) do { \
+#define thread_lock(th) \
+MACRO_BEGIN \
assert_splsched(); \
simple_lock_nocheck(&(th)->lock); \
-} while (0)
-#define thread_unlock(th) do { \
+MACRO_END
+#define thread_unlock(th) \
+MACRO_BEGIN \
assert_splsched(); \
simple_unlock_nocheck(&(th)->lock); \
-} while (0)
+MACRO_END
#else
#define thread_lock(th) simple_lock_nocheck(&(th)->lock)
#define thread_unlock(th) simple_unlock_nocheck(&(th)->lock)
diff --git a/kern/timer.c b/kern/timer.c
index 13dfc207..27137c80 100644
--- a/kern/timer.c
+++ b/kern/timer.c
@@ -378,10 +378,11 @@ static void timer_grab(
} while ( (save)->high != (timer)->high_bits_check);
}
-#define TIMER_TO_TIME_VALUE64(tv, timer) do { \
+#define TIMER_TO_TIME_VALUE64(tv, timer) \
+MACRO_BEGIN \
(tv)->seconds = (timer)->high + (timer)->low / 1000000; \
- (tv)->nanoseconds = (timer)->low % 1000000 * 1000; \
-} while(0);
+ (tv)->nanoseconds = (timer)->low % 1000000 * 1000; \
+MACRO_END
/*
* timer_read reads the value of a timer into a time_value64_t. If the
diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c
index 1bd8b0e0..6db77070 100644
--- a/linux/dev/glue/block.c
+++ b/linux/dev/glue/block.c
@@ -1044,13 +1044,13 @@ check:
#define DECL_DATA struct temp_data td
#define INIT_DATA() \
-{ \
+MACRO_BEGIN \
list_init (&td.pages); \
td.inode.i_rdev = bd->dev; \
td.file.f_mode = bd->mode; \
td.file.f_flags = bd->flags; \
current_thread ()->pcb->data = &td; \
-}
+MACRO_END
static io_return_t
device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
diff --git a/linux/dev/include/linux/blk.h b/linux/dev/include/linux/blk.h
index b924a14f..d0674a52 100644
--- a/linux/dev/include/linux/blk.h
+++ b/linux/dev/include/linux/blk.h
@@ -356,6 +356,7 @@ static void (DEVICE_REQUEST)(void);
#endif
#define INIT_REQUEST \
+MACRO_BEGIN \
if (!CURRENT) {\
CLEAR_INTR; \
return; \
@@ -365,7 +366,8 @@ static void (DEVICE_REQUEST)(void);
if (CURRENT->bh) { \
if (!buffer_locked(CURRENT->bh)) \
panic(DEVICE_NAME ": block not locked"); \
- }
+ } \
+MACRO_END
#endif /* !defined(IDE_DRIVER) */
diff --git a/vm/pmap.h b/vm/pmap.h
index aca9ada8..ef0fe965 100644
--- a/vm/pmap.h
+++ b/vm/pmap.h
@@ -209,17 +209,19 @@ extern vm_offset_t pmap_map_bd(
* Routines defined as macros.
*/
#ifndef PMAP_ACTIVATE_USER
-#define PMAP_ACTIVATE_USER(pmap, thread, cpu) { \
+#define PMAP_ACTIVATE_USER(pmap, thread, cpu) \
+MACRO_BEGIN \
if ((pmap) != kernel_pmap) \
PMAP_ACTIVATE(pmap, thread, cpu); \
-}
+MACRO_END
#endif /* PMAP_ACTIVATE_USER */
#ifndef PMAP_DEACTIVATE_USER
-#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
+#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) \
+MACRO_BEGIN \
if ((pmap) != kernel_pmap) \
PMAP_DEACTIVATE(pmap, thread, cpu); \
-}
+MACRO_END
#endif /* PMAP_DEACTIVATE_USER */
#ifndef PMAP_ACTIVATE_KERNEL
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index b53592f0..c16f5e34 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -1637,33 +1637,37 @@ kern_return_t vm_fault_wire_fast(
*/
#undef RELEASE_PAGE
-#define RELEASE_PAGE(m) { \
+#define RELEASE_PAGE(m) \
+MACRO_BEGIN \
PAGE_WAKEUP_DONE(m); \
vm_page_lock_queues(); \
vm_page_unwire(m); \
vm_page_unlock_queues(); \
-}
+MACRO_END
#undef UNLOCK_THINGS
-#define UNLOCK_THINGS { \
+#define UNLOCK_THINGS \
+MACRO_BEGIN \
object->paging_in_progress--; \
vm_object_unlock(object); \
-}
+MACRO_END
#undef UNLOCK_AND_DEALLOCATE
-#define UNLOCK_AND_DEALLOCATE { \
+#define UNLOCK_AND_DEALLOCATE \
+MACRO_BEGIN \
UNLOCK_THINGS; \
vm_object_deallocate(object); \
-}
+MACRO_END
/*
* Give up and have caller do things the hard way.
*/
-#define GIVE_UP { \
+#define GIVE_UP \
+MACRO_BEGIN \
UNLOCK_AND_DEALLOCATE; \
return(KERN_FAILURE); \
-}
+MACRO_END
/*
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 09f23e3d..2e243b78 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -572,9 +572,11 @@ void vm_map_deallocate(vm_map_t map)
* future lookups. Performs necessary interlocks.
*/
#define SAVE_HINT(map,value) \
+ MACRO_BEGIN \
simple_lock(&(map)->hint_lock); \
(map)->hint = (value); \
- simple_unlock(&(map)->hint_lock);
+ simple_unlock(&(map)->hint_lock); \
+ MACRO_END
/*
* vm_map_lookup_entry: [ internal use only ]
@@ -974,7 +976,10 @@ kern_return_t vm_map_enter(
vm_offset_t end;
kern_return_t result = KERN_SUCCESS;
-#define RETURN(value) { result = value; goto BailOut; }
+#define RETURN(value) \
+MACRO_BEGIN \
+ result = value; goto BailOut; \
+MACRO_END
if (size == 0)
return KERN_INVALID_ARGUMENT;
@@ -1314,14 +1319,14 @@ void _vm_map_clip_end(
* addresses fall within the valid range of the map.
*/
#define VM_MAP_RANGE_CHECK(map, start, end) \
- { \
+ MACRO_BEGIN \
if (start < vm_map_min(map)) \
start = vm_map_min(map); \
if (end > vm_map_max(map)) \
end = vm_map_max(map); \
if (start > end) \
start = end; \
- }
+ MACRO_END
/*
* vm_map_submap: [ kernel use only ]
@@ -4685,11 +4690,11 @@ kern_return_t vm_map_lookup(
vm_map_lock_read(map);
#define RETURN(why) \
- { \
+ MACRO_BEGIN \
if (!(keep_map_locked && (why == KERN_SUCCESS))) \
vm_map_unlock_read(map); \
return(why); \
- }
+ MACRO_END
/*
* If the map has an interesting hint, try it before calling