diff options
author | Richard Braun <rbraun@sceen.net> | 2017-09-01 23:58:41 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2017-09-02 15:25:37 +0200 |
commit | d18d0e85596f90e0bd597b33d58209d0b3973c95 (patch) | |
tree | 6c3472f59cf64244ab86d2fc13b220b1c8f61165 | |
parent | 897ad6a062ea2a32a2759613608faf3271211832 (diff) |
Make assert have no side effects
This makes sure symbols referenced by assert uses may not be generated
if unused. The recently introduced __unused macro is used to suppress
compiler warnings resulting from this change.
-rw-r--r-- | arch/x86/machine/cga.c | 4 | ||||
-rw-r--r-- | arch/x86/machine/cpu.c | 4 | ||||
-rw-r--r-- | arch/x86/machine/ioapic.c | 2 | ||||
-rw-r--r-- | include/assert.h | 4 | ||||
-rw-r--r-- | kern/cbuf.c | 2 | ||||
-rw-r--r-- | kern/condition.c | 2 | ||||
-rw-r--r-- | kern/kmem.c | 2 | ||||
-rw-r--r-- | kern/log.c | 2 | ||||
-rw-r--r-- | kern/mutex/mutex_adaptive.c | 3 | ||||
-rw-r--r-- | kern/mutex/mutex_plain.c | 3 | ||||
-rw-r--r-- | kern/rtmutex.c | 2 | ||||
-rw-r--r-- | kern/semaphore.c | 3 | ||||
-rw-r--r-- | kern/sleepq.c | 29 | ||||
-rw-r--r-- | kern/spinlock.h | 10 | ||||
-rw-r--r-- | kern/sref.c | 2 | ||||
-rw-r--r-- | kern/task.h | 5 | ||||
-rw-r--r-- | kern/thread.c | 27 | ||||
-rw-r--r-- | kern/thread.h | 3 | ||||
-rw-r--r-- | kern/timer.c | 2 | ||||
-rw-r--r-- | kern/turnstile.c | 30 | ||||
-rw-r--r-- | kern/turnstile.h | 8 | ||||
-rw-r--r-- | vm/vm_kmem.c | 4 | ||||
-rw-r--r-- | vm/vm_page.c | 2 | ||||
-rw-r--r-- | vm/vm_page.h | 2 |
24 files changed, 88 insertions, 69 deletions
diff --git a/arch/x86/machine/cga.c b/arch/x86/machine/cga.c index b519ba1f..d086326d 100644 --- a/arch/x86/machine/cga.c +++ b/arch/x86/machine/cga.c @@ -216,7 +216,7 @@ static void cga_bbuf_redraw(struct cga_bbuf *bbuf) { size_t size; - int error; + __unused int error; size = CGA_MEMORY_SIZE; error = cbuf_read(&bbuf->cbuf, bbuf->view, cga_memory, &size); @@ -298,7 +298,7 @@ cga_bbuf_newline(struct cga_bbuf *bbuf) { uint16_t cursor = 0, spaces[CGA_COLUMNS]; size_t i, nr_spaces, offset, size; - int error; + __unused int error; cga_bbuf_reset_view(bbuf); diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c index 98d3680e..e971d99c 100644 --- a/arch/x86/machine/cpu.c +++ b/arch/x86/machine/cpu.c @@ -466,7 +466,9 @@ cpu_init(struct cpu *cpu) cpu->phys_addr_width = 0; cpu->virt_addr_width = 0; - assert(max_basic >= 1); + if (max_basic == 0) { + panic("cpu: unsupported maximum input value for basic information"); + } eax = 1; cpu_cpuid(&eax, &ebx, &ecx, &edx); diff --git a/arch/x86/machine/ioapic.c b/arch/x86/machine/ioapic.c index 82fbd6d5..d70ec92b 100644 --- a/arch/x86/machine/ioapic.c +++ b/arch/x86/machine/ioapic.c @@ -209,7 +209,7 @@ ioapic_create(unsigned int apic_id, uintptr_t addr, unsigned int gsi_base) return ioapic; } -static bool +__unused static bool ioapic_has_gsi(const struct ioapic *ioapic, unsigned int gsi) { return ((gsi >= ioapic->first_gsi) && (gsi <= ioapic->last_gsi)); diff --git a/include/assert.h b/include/assert.h index 8fdc2fe2..9bf3bdf0 100644 --- a/include/assert.h +++ b/include/assert.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Richard Braun. + * Copyright (c) 2010-2017 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,7 +21,7 @@ #define static_assert _Static_assert #ifdef NDEBUG -#define assert(expression) ((void)(expression)) +#define assert(expression) #else /* NDEBUG */ #include <kern/macros.h> diff --git a/kern/cbuf.c b/kern/cbuf.c index 2093f428..eca0c33c 100644 --- a/kern/cbuf.c +++ b/kern/cbuf.c @@ -72,7 +72,7 @@ cbuf_push(struct cbuf *cbuf, const void *buf, size_t size, bool erase) int cbuf_pop(struct cbuf *cbuf, void *buf, size_t *sizep) { - int error; + __unused int error; if (cbuf_size(cbuf) == 0) { return ERROR_AGAIN; diff --git a/kern/condition.c b/kern/condition.c index e6d65951..b12068fa 100644 --- a/kern/condition.c +++ b/kern/condition.c @@ -88,7 +88,7 @@ condition_wait_common(struct condition *condition, struct mutex *mutex, void condition_wait(struct condition *condition, struct mutex *mutex) { - int error; + __unused int error; error = condition_wait_common(condition, mutex, false, 0); assert(!error); diff --git a/kern/kmem.c b/kern/kmem.c index d98be0d6..5423c0a0 100644 --- a/kern/kmem.c +++ b/kern/kmem.c @@ -646,7 +646,7 @@ kmem_cache_register(struct kmem_cache *cache, struct kmem_slab *slab) uintptr_t va, end; phys_addr_t pa; bool virtual; - int error; + __unused int error; assert(kmem_cache_registration_required(cache)); assert(slab->nr_refs == 0); @@ -459,7 +459,7 @@ INIT_OP_DEFINE(log_start, static void log_write(const void *s, size_t size) { - int error; + __unused int error; error = cbuf_push(&log_cbuf, s, size, true); assert(!error); diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c index 68b006ad..8da55270 100644 --- a/kern/mutex/mutex_adaptive.c +++ b/kern/mutex/mutex_adaptive.c @@ -24,6 +24,7 @@ #include <kern/clock.h> #include <kern/error.h> #include <kern/init.h> +#include <kern/macros.h> #include <kern/mutex.h> #include <kern/mutex_types.h> #include <kern/sleepq.h> @@ -235,7 +236,7 @@ out: void mutex_adaptive_lock_slow(struct mutex *mutex) { - int error; + __unused int error; error = mutex_adaptive_lock_slow_common(mutex, false, 0); assert(!error); diff --git a/kern/mutex/mutex_plain.c b/kern/mutex/mutex_plain.c index 2c655940..abfa3396 100644 --- a/kern/mutex/mutex_plain.c +++ b/kern/mutex/mutex_plain.c @@ -22,6 +22,7 @@ #include <kern/atomic.h> #include <kern/init.h> +#include <kern/macros.h> #include <kern/mutex.h> #include <kern/mutex_types.h> #include <kern/sleepq.h> @@ -133,7 +134,7 @@ out: void mutex_plain_lock_slow(struct mutex *mutex) { - int error; + __unused int error; error = mutex_plain_lock_slow_common(mutex, false, 0); assert(!error); diff --git a/kern/rtmutex.c b/kern/rtmutex.c index c07bbfef..09637514 100644 --- a/kern/rtmutex.c +++ b/kern/rtmutex.c @@ -183,7 +183,7 @@ out: void rtmutex_lock_slow(struct rtmutex *rtmutex) { - int error; + __unused int error; error = rtmutex_lock_slow_common(rtmutex, false, 0); assert(!error); diff --git a/kern/semaphore.c b/kern/semaphore.c index 72e843a9..003f053c 100644 --- a/kern/semaphore.c +++ b/kern/semaphore.c @@ -20,6 +20,7 @@ #include <stddef.h> #include <stdint.h> +#include <kern/macros.h> #include <kern/semaphore.h> #include <kern/semaphore_i.h> #include <kern/sleepq.h> @@ -63,7 +64,7 @@ semaphore_wait_slow_common(struct semaphore *semaphore, void semaphore_wait_slow(struct semaphore *semaphore) { - int error; + __unused int error; error = semaphore_wait_slow_common(semaphore, false, 0); assert(!error); diff --git a/kern/sleepq.c b/kern/sleepq.c index 170b1a9f..0c20ae42 100644 --- a/kern/sleepq.c +++ b/kern/sleepq.c @@ -110,14 +110,14 @@ sleepq_waiter_wakeup(struct sleepq_waiter *waiter) thread_wakeup(waiter->thread); } -static void -sleepq_assert_init_state(const struct sleepq *sleepq) +__unused static bool +sleepq_state_initialized(const struct sleepq *sleepq) { - assert(sleepq->bucket == NULL); - assert(sleepq->sync_obj == NULL); - assert(list_empty(&sleepq->waiters)); - assert(sleepq->oldest_waiter == NULL); - assert(sleepq->next_free == NULL); + return ((sleepq->bucket == NULL) + && (sleepq->sync_obj == NULL) + && (list_empty(&sleepq->waiters)) + && (sleepq->oldest_waiter == NULL) + && (sleepq->next_free == NULL)); } static void @@ -134,7 +134,7 @@ sleepq_unuse(struct sleepq *sleepq) sleepq->sync_obj = NULL; } -static bool +__unused static bool sleepq_in_use(const struct sleepq *sleepq) { return sleepq->sync_obj != NULL; @@ -186,7 +186,8 @@ sleepq_bucket_add(struct sleepq_bucket *bucket, struct sleepq *sleepq) } static void -sleepq_bucket_remove(struct sleepq_bucket *bucket, struct sleepq *sleepq) +sleepq_bucket_remove(__unused struct sleepq_bucket *bucket, + struct sleepq *sleepq) { assert(sleepq->bucket == bucket); sleepq->bucket = NULL; @@ -253,14 +254,14 @@ sleepq_create(void) return NULL; } - sleepq_assert_init_state(sleepq); + assert(sleepq_state_initialized(sleepq)); return sleepq; } void sleepq_destroy(struct sleepq *sleepq) { - sleepq_assert_init_state(sleepq); + assert(sleepq_state_initialized(sleepq)); kmem_cache_free(&sleepq_cache, sleepq); } @@ -352,7 +353,7 @@ sleepq_lend(const void *sync_obj, bool condition, unsigned long *flags) assert(sync_obj != NULL); sleepq = thread_sleepq_lend(); - sleepq_assert_init_state(sleepq); + assert(sleepq_state_initialized(sleepq)); bucket = sleepq_bucket_get(sync_obj, condition); @@ -390,7 +391,7 @@ sleepq_return(struct sleepq *sleepq, unsigned long flags) spinlock_unlock_intr_restore(&bucket->lock, flags); - sleepq_assert_init_state(free_sleepq); + assert(sleepq_state_initialized(free_sleepq)); thread_sleepq_return(free_sleepq); } @@ -468,7 +469,7 @@ sleepq_wait_common(struct sleepq *sleepq, const char *wchan, void sleepq_wait(struct sleepq *sleepq, const char *wchan) { - int error; + __unused int error; error = sleepq_wait_common(sleepq, wchan, false, 0); assert(!error); diff --git a/kern/spinlock.h b/kern/spinlock.h index d4105da0..50ac5401 100644 --- a/kern/spinlock.h +++ b/kern/spinlock.h @@ -26,6 +26,8 @@ #ifndef _KERN_SPINLOCK_H #define _KERN_SPINLOCK_H +#include <stdbool.h> + #include <kern/init.h> #include <kern/macros.h> #include <kern/spinlock_i.h> @@ -35,13 +37,17 @@ struct spinlock; -#define spinlock_assert_locked(lock) assert((lock)->value != SPINLOCK_UNLOCKED) - /* * Initialize a spin lock. */ void spinlock_init(struct spinlock *lock); +static inline bool +spinlock_locked(const struct spinlock *lock) +{ + return lock->value != SPINLOCK_UNLOCKED; +} + /* * Attempt to lock the given spin lock. * diff --git a/kern/sref.c b/kern/sref.c index 3603e781..95daeb0f 100644 --- a/kern/sref.c +++ b/kern/sref.c @@ -242,7 +242,7 @@ sref_queue_concat(struct sref_queue *queue1, struct sref_queue *queue2) queue1->size += queue2->size; } -static inline bool +__unused static inline bool sref_counter_aligned(const struct sref_counter *counter) { return (((uintptr_t)counter & (~SREF_WEAKREF_MASK)) == 0); diff --git a/kern/task.h b/kern/task.h index 1711fbee..149ff49d 100644 --- a/kern/task.h +++ b/kern/task.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Richard Braun. + * Copyright (c) 2012-2017 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,6 +21,7 @@ #include <kern/atomic.h> #include <kern/init.h> #include <kern/list.h> +#include <kern/macros.h> #include <kern/spinlock.h> #include <kern/thread.h> #include <vm/vm_map.h> @@ -50,7 +51,7 @@ extern struct task *kernel_task; static inline void task_ref(struct task *task) { - unsigned long nr_refs; + __unused unsigned long nr_refs; nr_refs = atomic_fetch_add(&task->nr_refs, 1, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); diff --git a/kern/thread.c b/kern/thread.c index 4a9cb2a1..5d8ac11d 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -475,7 +475,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); assert(!thread->in_runq); ops = thread_get_real_sched_ops(thread); @@ -502,7 +502,7 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); assert(thread->in_runq); runq->nr_threads--; @@ -523,7 +523,7 @@ thread_runq_put_prev(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); ops = thread_get_real_sched_ops(thread); @@ -539,7 +539,7 @@ thread_runq_get_next(struct thread_runq *runq) unsigned int i; assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); for (i = 0; i < ARRAY_SIZE(thread_sched_ops); i++) { thread = thread_sched_ops[i].get_next(runq); @@ -572,7 +572,7 @@ static void thread_runq_wakeup(struct thread_runq *runq, struct thread *thread) { assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); assert(thread->state == THREAD_RUNNING); thread_runq_add(runq, thread); @@ -612,7 +612,7 @@ thread_runq_schedule(struct thread_runq *runq) && (__builtin_frame_address(0) < (prev->stack + TCB_STACK_SIZE))); assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); llsync_report_context_switch(); @@ -657,7 +657,7 @@ thread_runq_schedule(struct thread_runq *runq) assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - spinlock_assert_locked(&runq->lock); + assert(spinlock_locked(&runq->lock)); return runq; } @@ -762,7 +762,8 @@ thread_sched_rt_get_next(struct thread_runq *runq) } static void -thread_sched_rt_reset_priority(struct thread *thread, unsigned short priority) +thread_sched_rt_reset_priority(struct thread *thread, + __unused unsigned short priority) { assert(priority <= THREAD_SCHED_RT_PRIO_MAX); thread->rt_data.time_slice = THREAD_DEFAULT_RR_TIME_SLICE; @@ -1888,12 +1889,12 @@ thread_unlock_runq(struct thread_runq *runq, unsigned long flags) static void * thread_alloc_stack(void) { - struct vm_page *first_page, *last_page; + __unused struct vm_page *first_page, *last_page; phys_addr_t first_pa, last_pa; size_t stack_size; uintptr_t va; void *mem; - int error; + __unused int error; stack_size = vm_page_round(TCB_STACK_SIZE); mem = vm_kmem_alloc((PAGE_SIZE * 2) + stack_size); @@ -2546,7 +2547,7 @@ void thread_sleep(struct spinlock *interlock, const void *wchan_addr, const char *wchan_desc) { - int error; + __unused int error; error = thread_sleep_common(interlock, wchan_addr, wchan_desc, false, 0); assert(!error); @@ -2776,14 +2777,14 @@ thread_pi_setscheduler(struct thread *thread, unsigned char policy, unsigned short priority) { const struct thread_sched_ops *ops; + __unused struct turnstile_td *td; struct thread_runq *runq; - struct turnstile_td *td; unsigned int global_priority; unsigned long flags; bool requeue, current; td = thread_turnstile_td(thread); - turnstile_td_assert_lock(td); + assert(turnstile_td_locked(td)); ops = thread_get_sched_ops(thread_policy_to_class(policy)); global_priority = ops->get_global_priority(priority); diff --git a/kern/thread.h b/kern/thread.h index a3f2670d..1a811685 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -43,6 +43,7 @@ #include <kern/init.h> #include <kern/condition.h> #include <kern/cpumap.h> +#include <kern/macros.h> #include <kern/spinlock_types.h> #include <kern/turnstile_types.h> #include <machine/cpu.h> @@ -281,7 +282,7 @@ void thread_pi_setscheduler(struct thread *thread, unsigned char policy, static inline void thread_ref(struct thread *thread) { - unsigned long nr_refs; + __unused unsigned long nr_refs; nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); diff --git a/kern/timer.c b/kern/timer.c index 77a6bb36..57f1765f 100644 --- a/kern/timer.c +++ b/kern/timer.c @@ -135,7 +135,7 @@ timer_unlock_cpu_data(struct timer_cpu_data *cpu_data, unsigned long flags) * Timer state functions. */ -static bool +__unused static bool timer_ready(const struct timer *timer) { return timer->state == TIMER_TS_READY; diff --git a/kern/turnstile.c b/kern/turnstile.c index e59a7f3f..a9b994d5 100644 --- a/kern/turnstile.c +++ b/kern/turnstile.c @@ -401,14 +401,14 @@ turnstile_td_propagate_priority(struct turnstile_td *td) turnstile_td_propagate_priority_loop(td); } -static void -turnstile_assert_init_state(const struct turnstile *turnstile) +__unused static bool +turnstile_state_initialized(const struct turnstile *turnstile) { - assert(turnstile->bucket == NULL); - assert(turnstile->sync_obj == NULL); - assert(plist_empty(&turnstile->waiters)); - assert(turnstile->next_free == NULL); - assert(turnstile->top_waiter == NULL); + return ((turnstile->bucket == NULL) + && (turnstile->sync_obj == NULL) + && (plist_empty(&turnstile->waiters)) + && (turnstile->next_free == NULL) + && (turnstile->top_waiter == NULL)); } static void @@ -425,7 +425,7 @@ turnstile_unuse(struct turnstile *turnstile) turnstile->sync_obj = NULL; } -static bool +__unused static bool turnstile_in_use(const struct turnstile *turnstile) { return turnstile->sync_obj != NULL; @@ -464,7 +464,7 @@ turnstile_bucket_add(struct turnstile_bucket *bucket, } static void -turnstile_bucket_remove(struct turnstile_bucket *bucket, +turnstile_bucket_remove(__unused struct turnstile_bucket *bucket, struct turnstile *turnstile) { assert(turnstile->bucket == bucket); @@ -529,14 +529,14 @@ turnstile_create(void) return NULL; } - turnstile_assert_init_state(turnstile); + assert(turnstile_state_initialized(turnstile)); return turnstile; } void turnstile_destroy(struct turnstile *turnstile) { - turnstile_assert_init_state(turnstile); + assert(turnstile_state_initialized(turnstile)); kmem_cache_free(&turnstile_cache, turnstile); } @@ -603,7 +603,7 @@ turnstile_lend(const void *sync_obj) assert(sync_obj != NULL); turnstile = thread_turnstile_lend(); - turnstile_assert_init_state(turnstile); + assert(turnstile_state_initialized(turnstile)); td = thread_turnstile_td(thread_self()); bucket = turnstile_bucket_get(sync_obj); @@ -653,7 +653,7 @@ turnstile_return(struct turnstile *turnstile) spinlock_unlock(&bucket->lock); - turnstile_assert_init_state(free_turnstile); + assert(turnstile_state_initialized(free_turnstile)); thread_turnstile_return(free_turnstile); } @@ -774,7 +774,7 @@ void turnstile_wait(struct turnstile *turnstile, const char *wchan, struct thread *owner) { - int error; + __unused int error; error = turnstile_wait_common(turnstile, wchan, owner, false, 0); assert(!error); @@ -806,7 +806,7 @@ turnstile_own(struct turnstile *turnstile) { struct turnstile_td *td; struct thread *owner; - unsigned int top_priority; + __unused unsigned int top_priority; assert(turnstile->owner == NULL); diff --git a/kern/turnstile.h b/kern/turnstile.h index e7b4a5e3..f7ba0847 100644 --- a/kern/turnstile.h +++ b/kern/turnstile.h @@ -43,8 +43,6 @@ struct turnstile; */ struct turnstile_td; -#define turnstile_td_assert_lock(td) spinlock_assert_locked(&(td)->lock) - /* * Initialize turnstile thread data. */ @@ -62,6 +60,12 @@ turnstile_td_init(struct turnstile_td *td) * Turnstile thread data locking functions. */ +static inline bool +turnstile_td_locked(struct turnstile_td *td) +{ + return spinlock_locked(&(td)->lock); +} + static inline void turnstile_td_lock(struct turnstile_td *td) { diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c index 101c9815..d526804c 100644 --- a/vm/vm_kmem.c +++ b/vm/vm_kmem.c @@ -64,7 +64,7 @@ INIT_OP_DEFINE(vm_kmem_setup, INIT_OP_DEP(vm_object_setup, true), INIT_OP_DEP(vm_page_setup, true)); -static int +__unused static int vm_kmem_alloc_check(size_t size) { if (!vm_page_aligned(size) @@ -75,7 +75,7 @@ vm_kmem_alloc_check(size_t size) return 0; } -static int +__unused static int vm_kmem_free_check(uintptr_t va, size_t size) { if (!vm_page_aligned(va)) { diff --git a/vm/vm_page.c b/vm/vm_page.c index 49c2c11e..19f6f4be 100644 --- a/vm/vm_page.c +++ b/vm/vm_page.c @@ -790,7 +790,7 @@ vm_page_lookup(phys_addr_t pa) return NULL; } -static bool +__unused static bool vm_page_block_referenced(const struct vm_page *page, unsigned int order) { unsigned int i, nr_pages; diff --git a/vm/vm_page.h b/vm/vm_page.h index 70a0091a..e9ca321c 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -240,7 +240,7 @@ vm_page_referenced(const struct vm_page *page) static inline void vm_page_ref(struct vm_page *page) { - unsigned int nr_refs; + __unused unsigned int nr_refs; nr_refs = atomic_fetch_add(&page->nr_refs, 1, ATOMIC_RELAXED); assert(nr_refs != (unsigned int)-1); |