/* Enqueue and list of read or write requests, 64bit offset version. Copyright (C) 1997, 1998 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper , 1997. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with the GNU C Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include #include #include #include "aio_misc.h" /* We need this special structure to handle asynchronous I/O. */ struct async_waitlist { int counter; struct sigevent sigev; struct waitlist list[0]; }; int lio_listio64 (mode, list, nent, sig) int mode; struct aiocb64 *const list[]; int nent; struct sigevent *sig; { struct requestlist *requests[nent]; int cnt; volatile int total = 0; int result = 0; /* Check arguments. */ if (mode != LIO_WAIT && mode != LIO_NOWAIT) { __set_errno (EINVAL); return -1; } /* Request the mutex. */ pthread_mutex_lock (&__aio_requests_mutex); /* Now we can enqueue all requests. Since we already acquired the mutex the enqueue function need not do this. */ for (cnt = 0; cnt < nent; ++cnt) if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP) { requests[cnt] = __aio_enqueue_request ((aiocb_union *) list[cnt], (list[cnt]->aio_lio_opcode | 128)); if (requests[cnt] != NULL) /* Successfully enqueued. */ ++total; else /* Signal that we've seen an error. `errno' and the error code of the aiocb will tell more. */ result = -1; } if (total == 0) { /* We don't have anything to do except signalling if we work asynchronously. */ if (mode == LIO_NOWAIT) __aio_notify_only (sig); } else if (mode == LIO_WAIT) { pthread_cond_t cond = PTHREAD_COND_INITIALIZER; struct waitlist waitlist[nent]; int oldstate; total = 0; for (cnt = 0; cnt < nent; ++cnt) if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP && requests[cnt] != NULL) { waitlist[cnt].cond = &cond; waitlist[cnt].next = requests[cnt]->waiting; waitlist[cnt].counterp = &total; waitlist[cnt].sigevp = NULL; requests[cnt]->waiting = &waitlist[cnt]; ++total; } /* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancelation points we must be careful. We added entries to the waiting lists which we must remove. So defer cancelation for now. */ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate); while (total > 0) pthread_cond_wait (&cond, &__aio_requests_mutex); /* Now it's time to restore the cancelation state. */ pthread_setcancelstate (oldstate, NULL); /* Release the conditional variable. */ if (pthread_cond_destroy (&cond) != 0) /* This must never happen. */ abort (); } else { struct async_waitlist *waitlist; waitlist = (struct async_waitlist *) malloc (sizeof (struct async_waitlist) + (nent * sizeof (struct waitlist))); if (waitlist == NULL) { __set_errno (EAGAIN); result = -1; } else { total = 0; for (cnt = 0; cnt < nent; ++cnt) if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP && requests[cnt] != NULL) { waitlist->list[cnt].cond = NULL; waitlist->list[cnt].next = requests[cnt]->waiting; waitlist->list[cnt].counterp = &waitlist->counter; waitlist->list[cnt].sigevp = &waitlist->sigev; requests[cnt]->waiting = &waitlist->list[cnt]; ++total; } waitlist->counter = total; waitlist->sigev = *sig; } } /* Release the mutex. */ pthread_mutex_unlock (&__aio_requests_mutex); return result; }