summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZheng Da <zhengda1936@gmail.com>2009-11-14 00:15:08 +0100
committerZheng Da <zhengda1936@gmail.com>2009-11-14 00:15:08 +0100
commit6c25f97b8e9171eb399d56549cded82d29d05924 (patch)
treed4870807926dcba2a27fb55f705f8af1424d77b1
parentea13a76596f0a980fad58b83a6b50917d65b67c0 (diff)
A working user-level pcnet32 driver.user-level_pcnet32
-rw-r--r--pcnet32/Makefile31
-rw-r--r--pcnet32/atomic.h69
-rw-r--r--pcnet32/bitops.h201
-rw-r--r--pcnet32/dev_hdr.h157
-rw-r--r--pcnet32/device.defs204
-rw-r--r--pcnet32/device_emul.h63
-rw-r--r--pcnet32/device_reply.defs1
-rw-r--r--pcnet32/ds_routines.c1225
-rw-r--r--pcnet32/ds_routines.h55
-rw-r--r--pcnet32/if.h183
-rw-r--r--pcnet32/if_arp.h132
-rw-r--r--pcnet32/if_ether.h119
-rw-r--r--pcnet32/io_req.h135
-rw-r--r--pcnet32/irq.c34
-rw-r--r--pcnet32/irq.h27
-rw-r--r--pcnet32/kmem.c481
-rw-r--r--pcnet32/linux-types.h39
-rw-r--r--pcnet32/mach.defs779
-rw-r--r--pcnet32/main.c236
-rw-r--r--pcnet32/net.c834
-rw-r--r--pcnet32/net_init.c450
-rw-r--r--pcnet32/netdevice.h335
-rw-r--r--pcnet32/notify.defs1
-rw-r--r--pcnet32/pci.h1114
-rw-r--r--pcnet32/pcnet32.c1012
-rw-r--r--pcnet32/pcnet32.prof_d1
-rw-r--r--pcnet32/queue.c131
-rw-r--r--pcnet32/queue.h370
-rw-r--r--pcnet32/skbuff.h482
-rw-r--r--pcnet32/spl.h78
-rw-r--r--pcnet32/util.h33
-rw-r--r--pcnet32/vm_param.h7
32 files changed, 9019 insertions, 0 deletions
diff --git a/pcnet32/Makefile b/pcnet32/Makefile
new file mode 100644
index 000000000..6b91dca2d
--- /dev/null
+++ b/pcnet32/Makefile
@@ -0,0 +1,31 @@
+# Copyright (C) 2009 Free Software Foundation, Inc.
+# This file is part of the GNU Hurd.
+#
+# The GNU Hurd is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# The GNU Hurd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with the GNU Hurd; see the file COPYING. If not, write to
+# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+dir := pcnet32
+makemode := server
+
+SRCS = pcnet32.c net_init.c deviceUser.c machUser.c irq.c net.c main.c \
+ ds_routines.c queue.c device_replyUser.c deviceServer.c \
+ notifyServer.c kmem.c
+LCLHDRS =
+HURDLIBS = threads ports fshelp shouldbeinlibc trivfs
+target = pcnet32
+OBJS = $(SRCS:.c=.o) $(MIGSTUBS)
+
+include ../Makeconf
+
+LDFLAGS += -lpciaccess
diff --git a/pcnet32/atomic.h b/pcnet32/atomic.h
new file mode 100644
index 000000000..7e5dd06dd
--- /dev/null
+++ b/pcnet32/atomic.h
@@ -0,0 +1,69 @@
+#ifndef __ARCH_I386_ATOMIC__
+#define __ARCH_I386_ATOMIC__
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#ifdef __SMP__
+#define LOCK "lock ; "
+#else
+#define LOCK ""
+#endif
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
+
+typedef int atomic_t;
+
+#define atomic_read(v) (*v)
+
+static __inline__ void atomic_add(atomic_t i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "addl %1,%0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"ir" (i), "m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "subl %1,%0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"ir" (i), "m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "incl %0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "decl %0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ LOCK "decl %0; sete %1"
+ :"=m" (__atomic_fool_gcc(v)), "=qm" (c)
+ :"m" (__atomic_fool_gcc(v)));
+ return c != 0;
+}
+
+#endif
diff --git a/pcnet32/bitops.h b/pcnet32/bitops.h
new file mode 100644
index 000000000..fc4cf192b
--- /dev/null
+++ b/pcnet32/bitops.h
@@ -0,0 +1,201 @@
+#ifndef _I386_BITOPS_H
+#define _I386_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+#ifdef __SMP__
+#define LOCK_PREFIX "lock ; "
+#define SMPVOL volatile
+#else
+#define LOCK_PREFIX ""
+#define SMPVOL
+#endif
+
+/*
+ * Some hacks to defeat gcc over-optimizations..
+ */
+struct __dummy { unsigned long a[100]; };
+#define ADDR (*(struct __dummy *) addr)
+#define CONST_ADDR (*(const struct __dummy *) addr)
+
+extern __inline__ int set_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+extern __inline__ int clear_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+extern __inline__ int change_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+extern __inline__ int test_bit(int nr, const SMPVOL void * addr)
+{
+ return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+/*
+ * Find-bit routines..
+ */
+extern __inline__ int find_first_zero_bit(void * addr, unsigned size)
+{
+ int res;
+
+ if (!size)
+ return 0;
+ __asm__("cld\n\t"
+ "movl $-1,%%eax\n\t"
+ "xorl %%edx,%%edx\n\t"
+ "repe; scasl\n\t"
+ "je 1f\n\t"
+ "xorl -4(%%edi),%%eax\n\t"
+ "subl $4,%%edi\n\t"
+ "bsfl %%eax,%%edx\n"
+ "1:\tsubl %%ebx,%%edi\n\t"
+ "shll $3,%%edi\n\t"
+ "addl %%edi,%%edx"
+ :"=d" (res)
+ :"c" ((size + 31) >> 5), "D" (addr), "b" (addr)
+ :"ax", "cx", "di");
+ return res;
+}
+
+extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
+{
+ unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
+ int set = 0, bit = offset & 31, res;
+
+ if (bit) {
+ /*
+ * Look for zero in first byte
+ */
+ __asm__("bsfl %1,%0\n\t"
+ "jne 1f\n\t"
+ "movl $32, %0\n"
+ "1:"
+ : "=r" (set)
+ : "r" (~(*p >> bit)));
+ if (set < (32 - bit))
+ return set + offset;
+ set = 32 - bit;
+ p++;
+ }
+ /*
+ * No zero yet, search remaining full bytes for a zero
+ */
+ res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
+ return (offset + set + res);
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+extern __inline__ unsigned long ffz(unsigned long word)
+{
+ __asm__("bsfl %1,%0"
+ :"=r" (word)
+ :"r" (~word));
+ return word;
+}
+
+#ifdef __KERNEL__
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+extern __inline__ int ffs(int x)
+{
+ int r;
+
+ __asm__("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "g" (x));
+ return r+1;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_BITOPS_H */
diff --git a/pcnet32/dev_hdr.h b/pcnet32/dev_hdr.h
new file mode 100644
index 000000000..45a997e83
--- /dev/null
+++ b/pcnet32/dev_hdr.h
@@ -0,0 +1,157 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+/*
+ * Mach device emulation definitions (i386at version).
+ *
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#ifndef _DEVICE_DEV_HDR_H_
+#define _DEVICE_DEV_HDR_H_
+
+#include <mach.h>
+#include <hurd.h>
+#include <hurd/ports.h>
+#include <cthreads.h>
+
+#include "device_emul.h"
+
+/*
+ * Operations list for major device types.
+ */
+struct dev_ops {
+ char * d_name; /* name for major device */
+ int (*d_open)(); /* open device */
+ int (*d_close)(); /* close device */
+ int (*d_read)(); /* read */
+ int (*d_write)(); /* write */
+ int (*d_getstat)(); /* get status/control */
+ int (*d_setstat)(); /* set status/control */
+ vm_offset_t (*d_mmap)(); /* map memory */
+ int (*d_async_in)();/* asynchronous input setup */
+ int (*d_reset)(); /* reset device */
+ int (*d_port_death)();
+ /* clean up reply ports */
+ int d_subdev; /* number of sub-devices per
+ unit */
+ int (*d_dev_info)(); /* driver info for kernel */
+};
+typedef struct dev_ops *dev_ops_t;
+
+/* This structure is associated with each open device port.
+ * The port representing the device points to this structure. */
+struct emul_device
+{
+ struct device_emulation_ops *emul_ops;
+ void *emul_data;
+};
+
+typedef struct emul_device *emul_device_t;
+
+#define DEVICE_NULL ((device_t) 0)
+
+/*
+ * Generic device header. May be allocated with the device,
+ * or built when the device is opened.
+ */
+struct mach_device {
+ struct port_info port;
+ struct emul_device dev; /* the real device structure */
+// decl_simple_lock_data(,ref_lock)/* lock for reference count */
+// int ref_count; /* reference count */
+ struct mutex lock;
+// decl_simple_lock_data(, lock) /* lock for rest of state */
+ short state; /* state: */
+#define DEV_STATE_INIT 0 /* not open */
+#define DEV_STATE_OPENING 1 /* being opened */
+#define DEV_STATE_OPEN 2 /* open */
+#define DEV_STATE_CLOSING 3 /* being closed */
+ short flag; /* random flags: */
+#define D_EXCL_OPEN 0x0001 /* open only once */
+ short open_count; /* number of times open */
+ short io_in_progress; /* number of IOs in progress */
+ boolean_t io_wait; /* someone waiting for IO to finish */
+
+// struct ipc_port *port; /* open port */
+// queue_chain_t number_chain; /* chain for lookup by number */
+ int dev_number; /* device number */
+ int bsize; /* replacement for DEV_BSIZE */
+ struct dev_ops *dev_ops; /* and operations vector */
+};
+typedef struct mach_device *mach_device_t;
+#define MACH_DEVICE_NULL ((mach_device_t)0)
+
+/*
+ * To find and remove device entries
+ */
+mach_device_t device_lookup(char *); /* by name */
+
+void mach_device_reference(mach_device_t);
+//void mach_device_deallocate(mach_device_t);
+
+/*
+ * To find and remove port-to-device mappings
+ */
+//device_t dev_port_lookup(ipc_port_t);
+void dev_port_enter(mach_device_t);
+void dev_port_remove(mach_device_t);
+
+/*
+ * To call a routine on each device
+ */
+boolean_t dev_map(boolean_t (*)(), mach_port_t);
+
+/*
+ * To lock and unlock state and open-count
+ */
+#define device_lock(device) mutex_lock(&(device)->lock)
+#define device_unlock(device) mutex_unlock(&(device)->lock)
+
+#endif /* _DEVICE_DEV_HDR_H_ */
diff --git a/pcnet32/device.defs b/pcnet32/device.defs
new file mode 100644
index 000000000..6a73853ad
--- /dev/null
+++ b/pcnet32/device.defs
@@ -0,0 +1,204 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: device/device.defs
+ * Author: Douglas Orr
+ * Feb 10, 1988
+ * Abstract:
+ * Mach device support. Mach devices are accessed through
+ * block and character device interfaces to the kernel.
+ */
+
+#ifdef MACH_KERNEL
+simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
+#endif
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif
+ device 2800;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <device/device_types.defs>
+
+serverprefix ds_;
+
+type pci_config_data_t = array[*:4] of char;
+type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE | polymorphic
+ ctype: mach_port_t;
+
+routine device_open(
+ master_port : mach_port_t;
+ sreplyport reply_port : reply_port_t;
+ mode : dev_mode_t;
+ name : dev_name_t;
+ out device : device_t
+ );
+
+routine device_close(
+ device : device_t
+ );
+
+routine device_write(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_t;
+ out bytes_written : int
+ );
+
+routine device_write_inband(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_inband_t;
+ out bytes_written : int
+ );
+
+routine device_read(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int;
+ out data : io_buf_ptr_t
+ );
+
+routine device_read_inband(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int;
+ out data : io_buf_ptr_inband_t
+ );
+
+/* obsolete */
+routine xxx_device_set_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ in status : dev_status_t, IsLong
+ );
+
+/* obsolete */
+routine xxx_device_get_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ out status : dev_status_t, IsLong
+ );
+
+/* obsolete */
+routine xxx_device_set_filter(
+ device : device_t;
+ in receive_port : mach_port_send_t;
+ in priority : int;
+ in filter : filter_array_t, IsLong
+ );
+
+routine device_map(
+ device : device_t;
+ in prot : vm_prot_t;
+ in offset : vm_offset_t;
+ in size : vm_size_t;
+ out pager : memory_object_t;
+ in unmap : int
+ );
+
+routine device_set_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ in status : dev_status_t
+ );
+
+routine device_get_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ out status : dev_status_t, CountInOut
+ );
+
+routine device_set_filter(
+ device : device_t;
+ in receive_port : mach_port_send_t;
+ in priority : int;
+ in filter : filter_array_t
+ );
+
+routine device_intr_notify(
+ master_port : mach_port_t;
+ in irq : int;
+ in id : int;
+ in receive_port : mach_port_send_t
+ );
+
+/*
+ * Test whether IPC devices exist.
+ */
+routine pci_present(
+ master_port : mach_port_t);
+
+/*
+ * Find the specified PCI device.
+ */
+routine pci_find_device(
+ master_port : mach_port_t;
+ vendor : short;
+ device_id : short;
+ index : short;
+ out bus : char;
+ out device_fn : char);
+
+/*
+ * Read the configuration space of a IPC device.
+ */
+routine pci_read_config(
+ master_port : mach_port_t;
+ bus : char;
+ device_fn : char;
+ where : char;
+ bytes_wanted : int;
+ out result : pci_config_data_t);
+
+/*
+ * Write the configuration space of a IPC device.
+ */
+routine pci_write_config(
+ master_port : mach_port_t;
+ bus : char;
+ device_fn : char;
+ where : char;
+ data : pci_config_data_t);
+
+/*
+ * enable/disable the specified irq.
+ */
+routine device_irq_enable(
+ master_port : mach_port_t;
+ irq : int;
+ status : char);
diff --git a/pcnet32/device_emul.h b/pcnet32/device_emul.h
new file mode 100644
index 000000000..6f561345a
--- /dev/null
+++ b/pcnet32/device_emul.h
@@ -0,0 +1,63 @@
+/*
+ * Mach device emulation definitions (i386at version).
+ *
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#ifndef _I386AT_DEVICE_EMUL_H_
+#define _I386AT_DEVICE_EMUL_H_
+
+#include <mach.h>
+
+/* Each emulation layer provides these operations. */
+struct device_emulation_ops
+{
+ void (*reference) (void *);
+ void (*dealloc) (void *);
+ mach_port_t (*dev_to_port) (void *);
+ io_return_t (*open) (mach_port_t, mach_msg_type_name_t,
+ dev_mode_t, char *, device_t *);
+ io_return_t (*close) (void *);
+ io_return_t (*write) (void *, mach_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, io_buf_ptr_t, unsigned, int *);
+ io_return_t (*write_inband) (void *, mach_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, io_buf_ptr_inband_t,
+ unsigned, int *);
+ io_return_t (*read) (void *, mach_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, int, io_buf_ptr_t *, unsigned *);
+ io_return_t (*read_inband) (void *, mach_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, int, char *, unsigned *);
+ io_return_t (*set_status) (void *, dev_flavor_t, dev_status_t,
+ mach_msg_type_number_t);
+ io_return_t (*get_status) (void *, dev_flavor_t, dev_status_t,
+ mach_msg_type_number_t *);
+ io_return_t (*set_filter) (void *, mach_port_t, int, filter_t [], unsigned);
+ io_return_t (*map) (void *, vm_prot_t, vm_offset_t,
+ vm_size_t, mach_port_t *, boolean_t);
+ void (*no_senders) (mach_no_senders_notification_t *);
+ io_return_t (*write_trap) (void *, dev_mode_t,
+ recnum_t, vm_offset_t, vm_size_t);
+ io_return_t (*writev_trap) (void *, dev_mode_t,
+ recnum_t, io_buf_vec_t *, vm_size_t);
+};
+
+#endif /* _I386AT_DEVICE_EMUL_H_ */
diff --git a/pcnet32/device_reply.defs b/pcnet32/device_reply.defs
new file mode 100644
index 000000000..699303111
--- /dev/null
+++ b/pcnet32/device_reply.defs
@@ -0,0 +1 @@
+#include <device/device_reply.defs>
diff --git a/pcnet32/ds_routines.c b/pcnet32/ds_routines.c
new file mode 100644
index 000000000..a175fc828
--- /dev/null
+++ b/pcnet32/ds_routines.c
@@ -0,0 +1,1225 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+/*
+ * Mach device server routines (i386at version).
+ *
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <error.h>
+
+#include <hurd.h>
+#include <mach.h>
+#include <cthreads.h>
+
+#include "vm_param.h"
+#include "device_reply_U.h"
+#include "io_req.h"
+#include "dev_hdr.h"
+#include "util.h"
+#include "queue.h"
+#include "spl.h"
+
+extern struct port_bucket *port_bucket;
+extern struct port_class *dev_class;
+
+extern struct device_emulation_ops linux_net_emulation_ops;
+extern struct device_emulation_ops mach_device_emulation_ops;
+
+#define NUM_EMULATION (sizeof (emulation_list) / sizeof (emulation_list[0]))
+
+/* List of emulations. */
+static struct device_emulation_ops *emulation_list[] =
+{
+ &linux_net_emulation_ops,
+ &mach_device_emulation_ops,
+};
+
+extern kern_return_t device_intr_notify (mach_port_t master_port,
+ int irq, int id,
+ mach_port_t receive_port,
+ mach_msg_type_name_t receive_portPoly);
+
+mach_device_t
+device_lookup (char *name)
+{
+// if (strcmp (kbd_device->dev_ops->d_name, name) == 0)
+// {
+// mach_device_reference (kbd_device);
+// return kbd_device;
+// }
+ return NULL;
+}
+
+void
+mach_device_deallocate (void *device)
+{
+ ports_port_deref (device);
+}
+
+void
+mach_device_reference (mach_device_t device)
+{
+ ports_port_ref (device);
+}
+
+emul_device_t
+mach_convert_port_to_device (device_t device)
+{
+ mach_device_t dev = ports_lookup_port (port_bucket, device, dev_class);
+ if (dev == NULL)
+ return NULL;
+
+ return &dev->dev;
+}
+
+void *
+device_to_pi (emul_device_t device)
+{
+ return ((void *) device) - (int) &((mach_device_t) 0)->dev;
+}
+
+/*
+ * What follows is the interface for the native Mach devices.
+ */
+
+mach_port_t
+mach_convert_device_to_port (mach_device_t device)
+{
+ if (device == NULL)
+ return MACH_PORT_NULL;
+
+ // TODO I have to somehow dereference it when it is called at the first time.
+ return ports_get_right (device);
+}
+
+/* Implementation of device interface */
+kern_return_t
+ds_xxx_device_set_status (device_t device, dev_flavor_t flavor,
+ dev_status_t status, size_t statu_cnt)
+{
+ return D_INVALID_OPERATION;
+}
+
+kern_return_t
+ds_xxx_device_get_status (device_t device, dev_flavor_t flavor,
+ dev_status_t status, size_t *statuscnt)
+{
+ return D_INVALID_OPERATION;
+}
+
+kern_return_t
+ds_xxx_device_set_filter (device_t device, mach_port_t rec,
+ int pri, filter_array_t filt, size_t len)
+{
+ return D_INVALID_OPERATION;
+}
+
+io_return_t
+ds_device_intr_notify (mach_port_t master_port, int irq,
+ int id, mach_port_t receive_port)
+{
+ return D_INVALID_OPERATION;
+}
+
+kern_return_t
+ds_pci_write_config (mach_port_t master_port, char bus, char device_fn,
+ char where, pci_config_data_t data,
+ mach_msg_type_number_t dataCnt)
+{
+ return D_INVALID_OPERATION;
+}
+
+kern_return_t
+ds_pci_read_config (mach_port_t master_port, char bus, char device_fn,
+ char where, int bytes_wanted, pci_config_data_t result,
+ mach_msg_type_number_t *resultCnt)
+{
+ return D_INVALID_OPERATION;
+}
+
+kern_return_t
+ds_pci_find_device (mach_port_t master_port, short vendor, short device_id,
+ short index, short *bus, char *device_fn)
+{
+ return D_INVALID_OPERATION;
+}
+
+kern_return_t
+ds_pci_present (mach_port_t master_port)
+{
+ return D_INVALID_OPERATION;
+}
+
+kern_return_t
+ds_device_irq_enable (mach_port_t master_port,
+ int irq, char status)
+{
+ return D_INVALID_OPERATION;
+}
+
+io_return_t
+ds_device_open (mach_port_t open_port, mach_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ char *name, device_t *devp)
+{
+ int i;
+ io_return_t err;
+ extern boolean_t is_master_device (mach_port_t port);
+
+ /* Open must be called on the master device port. */
+ if (!is_master_device (open_port))
+ return D_INVALID_OPERATION;
+
+ /* There must be a reply port. */
+ if (! MACH_PORT_VALID (reply_port))
+ {
+ fprintf (stderr, "ds_* invalid reply port\n");
+ return MIG_NO_REPLY;
+ }
+
+ /* Call each emulation's open routine to find the device. */
+ for (i = 0; i < NUM_EMULATION; i++)
+ {
+ err = (*emulation_list[i]->open) (reply_port, reply_port_type,
+ mode, name, devp);
+ if (err != D_NO_SUCH_DEVICE)
+ break;
+ }
+
+ return err;
+}
+
+io_return_t
+ds_device_close (device_t dev)
+{
+ emul_device_t device;
+ io_return_t ret;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == MACH_PORT_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ device = mach_convert_port_to_device (dev);
+ ret = (device->emul_ops->close
+ ? (*device->emul_ops->close) (device->emul_data)
+ : D_SUCCESS);
+ mach_device_deallocate (device_to_pi (device));
+
+ ports_port_deref (device_to_pi (device));
+ return ret;
+}
+
+io_return_t
+ds_device_write (device_t dev, mach_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+ emul_device_t device;
+ io_return_t ret;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == MACH_PORT_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (data == 0)
+ return D_INVALID_SIZE;
+
+ device = mach_convert_port_to_device (dev);
+
+ if (! device->emul_ops->write)
+ return D_INVALID_OPERATION;
+
+ ret = (*device->emul_ops->write) (device->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ data, count, bytes_written);
+ ports_port_deref (device_to_pi (device));
+
+ return ret;
+}
+
+io_return_t
+ds_device_write_inband (device_t dev, mach_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, recnum_t recnum,
+ io_buf_ptr_inband_t data, unsigned count,
+ int *bytes_written)
+{
+ emul_device_t device;
+ io_return_t ret;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == MACH_PORT_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (data == 0)
+ return D_INVALID_SIZE;
+
+ device = mach_convert_port_to_device (dev);
+
+ if (! device->emul_ops->write_inband)
+ return D_INVALID_OPERATION;
+
+ ret = (*device->emul_ops->write_inband) (device->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ data, count, bytes_written);
+ ports_port_deref (device_to_pi (device));
+
+ return ret;
+}
+
+io_return_t
+ds_device_read (device_t dev, mach_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, int count, io_buf_ptr_t *data,
+ unsigned *bytes_read)
+{
+ emul_device_t device;
+ io_return_t ret;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == MACH_PORT_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ device = mach_convert_port_to_device (dev);
+
+ if (! device->emul_ops->read)
+ return D_INVALID_OPERATION;
+
+ ret = (*device->emul_ops->read) (device->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ count, data, bytes_read);
+ ports_port_deref (device_to_pi (device));
+ return ret;
+}
+
+io_return_t
+ds_device_read_inband (device_t dev, mach_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, int count, char *data,
+ unsigned *bytes_read)
+{
+ emul_device_t device;
+ io_return_t ret;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == MACH_PORT_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ device = mach_convert_port_to_device (dev);
+
+ if (! device->emul_ops->read_inband)
+ return D_INVALID_OPERATION;
+
+ ret = (*device->emul_ops->read_inband) (device->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ count, data, bytes_read);
+ ports_port_deref (device_to_pi (device));
+ return ret;
+}
+
+io_return_t
+ds_device_set_status (device_t dev, dev_flavor_t flavor,
+ dev_status_t status, mach_msg_type_number_t status_count)
+{
+ emul_device_t device;
+ io_return_t ret;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == MACH_PORT_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ device = mach_convert_port_to_device (dev);
+
+ if (! device->emul_ops->set_status)
+ return D_INVALID_OPERATION;
+
+ ret = (*device->emul_ops->set_status) (device->emul_data, flavor,
+ status, status_count);
+ ports_port_deref (device_to_pi (device));
+ return ret;
+}
+
+io_return_t
+ds_device_get_status (device_t dev, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *status_count)
+{
+ emul_device_t device;
+ io_return_t ret;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == MACH_PORT_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ device = mach_convert_port_to_device (dev);
+
+ if (! device->emul_ops->get_status)
+ return D_INVALID_OPERATION;
+
+ ret = (*device->emul_ops->get_status) (device->emul_data, flavor,
+ status, status_count);
+ ports_port_deref (device_to_pi (device));
+ return ret;
+}
+
+io_return_t
+ds_device_set_filter (device_t dev, mach_port_t receive_port, int priority,
+ filter_t *filter, unsigned filter_count)
+{
+ emul_device_t device;
+ io_return_t ret;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == MACH_PORT_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ device = mach_convert_port_to_device (dev);
+
+ if (! device->emul_ops->set_filter)
+ return D_INVALID_OPERATION;
+
+ ret = (*device->emul_ops->set_filter) (device->emul_data, receive_port,
+ priority, filter, filter_count);
+ ports_port_deref (device_to_pi (device));
+ return ret;
+}
+
+io_return_t
+ds_device_map (device_t dev, vm_prot_t prot, vm_offset_t offset,
+ vm_size_t size, mach_port_t *pager, boolean_t unmap)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == MACH_PORT_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ return D_INVALID_OPERATION;
+}
+
+boolean_t
+ds_open_done(ior)
+ register io_req_t ior;
+{
+ kern_return_t result;
+ register mach_device_t device;
+
+ device = ior->io_device;
+ result = ior->io_error;
+
+ if (result != D_SUCCESS) {
+ /*
+ * Open failed. Deallocate port and device.
+ */
+// dev_port_remove(device);
+// ipc_port_dealloc_kernel(device->port);
+
+ device_lock(device);
+ device->state = DEV_STATE_INIT;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+// thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+
+// mach_device_deallocate(device);
+ device = MACH_DEVICE_NULL;
+ }
+ else {
+ /*
+ * Open succeeded.
+ */
+ device_lock(device);
+ device->state = DEV_STATE_OPEN;
+ device->open_count = 1;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+// thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+
+ /* donate device reference to get port */
+ }
+ /*
+ * Must explicitly convert device to port, since
+ * device_reply interface is built as 'user' side
+ * (thus cannot get translation).
+ */
+ if (MACH_PORT_VALID(ior->io_reply_port)) {
+ (void) ds_device_open_reply(ior->io_reply_port,
+ ior->io_reply_port_type,
+ result,
+ mach_convert_device_to_port(device));
+ }
+// else
+// mach_device_deallocate(device);
+
+ mach_device_deallocate (device);
+ return (TRUE);
+}
+
+static io_return_t
+device_open(reply_port, reply_port_type, mode, name, device_p)
+ mach_port_t reply_port;
+ mach_msg_type_name_t reply_port_type;
+ dev_mode_t mode;
+ char * name;
+ device_t *device_p; /* out */
+{
+ register mach_device_t device;
+ register kern_return_t result;
+ register io_req_t ior;
+
+ device = device_lookup (name);
+ if (device == NULL)
+ return D_NO_SUCH_DEVICE;
+
+ /*
+ * If the device is being opened or closed,
+ * wait for that operation to finish.
+ */
+ device_lock(device);
+ while (device->state == DEV_STATE_OPENING ||
+ device->state == DEV_STATE_CLOSING) {
+// device->io_wait = TRUE;
+// thread_sleep((event_t)device, simple_lock_addr(device->lock), TRUE);
+// device_lock(device);
+ device_unlock (device);
+ mach_device_deallocate (device);
+ return D_INVALID_OPERATION;
+ }
+
+ /*
+ * If the device is already open, increment the open count
+ * and return.
+ */
+ if (device->state == DEV_STATE_OPEN) {
+
+ if (device->flag & D_EXCL_OPEN) {
+ /*
+ * Cannot open a second time.
+ */
+ device_unlock(device);
+ mach_device_deallocate(device);
+ return (D_ALREADY_OPEN);
+ }
+
+ device->open_count++;
+ device_unlock(device);
+// TODO I have to dereference it at the first time.
+ *device_p = ports_get_send_right (device);
+ return (D_SUCCESS);
+ /*
+ * Return deallocates device reference while acquiring
+ * port.
+ */
+ }
+
+ /*
+ * Allocate the device port and register the device before
+ * opening it.
+ */
+ device->state = DEV_STATE_OPENING;
+ device_unlock(device);
+
+// dev_port_enter(device);
+
+ /*
+ * Open the device.
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_OPEN | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_error = 0;
+ ior->io_done = ds_open_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ mach_device_reference (device);
+
+ result = (*device->dev_ops->d_open)(device->dev_number, (int)mode, ior);
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return result via ds_open_done.
+ */
+ ior->io_error = result;
+ (void) ds_open_done(ior);
+
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply already sent */
+}
+
+
+static io_return_t
+device_close(device)
+ register mach_device_t device;
+{
+ device_lock(device);
+
+ /*
+ * If device will remain open, do nothing.
+ */
+ if (--device->open_count > 0) {
+ device_unlock(device);
+ return (D_SUCCESS);
+ }
+
+ /*
+ * If device is being closed, do nothing.
+ */
+ if (device->state == DEV_STATE_CLOSING) {
+ device_unlock(device);
+ return (D_SUCCESS);
+ }
+
+ /*
+ * Mark device as closing, to prevent new IO.
+ * Outstanding IO will still be in progress.
+ */
+ device->state = DEV_STATE_CLOSING;
+ device_unlock(device);
+
+ /*
+ * ? wait for IO to end ?
+ * only if device wants to
+ */
+
+ /*
+ * Remove the device-port association.
+ */
+// dev_port_remove(device);
+// ipc_port_dealloc_kernel(device->port);
+
+ /*
+ * Close the device
+ */
+ (*device->dev_ops->d_close)(device->dev_number);
+
+ /*
+ * Finally mark it closed. If someone else is trying
+ * to open it, the open can now proceed.
+ */
+ device_lock(device);
+ device->state = DEV_STATE_INIT;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+// thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+
+ return (D_SUCCESS);
+}
+
+boolean_t ds_read_done(ior)
+ io_req_t ior;
+{
+ vm_offset_t start_data, end_data;
+ vm_offset_t start_sent, end_sent;
+ register vm_size_t size_read;
+
+ if (ior->io_error)
+ size_read = 0;
+ else
+ size_read = ior->io_count - ior->io_residual;
+
+ start_data = (vm_offset_t)ior->io_data;
+ end_data = start_data + size_read;
+
+ start_sent = (ior->io_op & IO_INBAND) ? start_data :
+ trunc_page(start_data);
+ end_sent = (ior->io_op & IO_INBAND) ?
+ start_data + ior->io_alloc_size : round_page(end_data);
+
+ /*
+ * Zero memory that the device did not fill.
+ */
+ if (start_sent < start_data)
+ memset((char *)start_sent, 0, start_data - start_sent);
+ if (end_sent > end_data)
+ memset((char *)end_data, 0, end_sent - end_data);
+
+
+ /*
+ * Touch the data being returned, to mark it dirty.
+ * If the pages were filled by DMA, the pmap module
+ * may think that they are clean.
+ */
+ {
+ register vm_offset_t touch;
+ register int c;
+
+ for (touch = start_sent; touch < end_sent; touch += PAGE_SIZE) {
+ c = *(volatile char *)touch;
+ *(volatile char *)touch = c;
+ }
+ }
+
+ /*
+ * Send the data to the reply port - this
+ * unwires and deallocates it.
+ */
+ if (ior->io_op & IO_INBAND) {
+ (void)ds_device_read_reply_inband(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (char *) start_data,
+ size_read);
+ } else {
+// vm_map_copy_t copy;
+// kern_return_t kr;
+//
+// kr = vm_map_copyin_page_list(kernel_map, start_data,
+// size_read, TRUE, TRUE,
+// &copy, FALSE);
+//
+// if (kr != KERN_SUCCESS)
+// panic("read_done: vm_map_copyin_page_list failed");
+
+ (void)ds_device_read_reply(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (char *) start_data,
+ size_read);
+ }
+
+ /*
+ * Free any memory that was allocated but not sent.
+ */
+ if (ior->io_count != 0) {
+ if (ior->io_op & IO_INBAND) {
+ if (ior->io_alloc_size > 0)
+ free (ior->io_data);
+// zfree(io_inband_zone, (vm_offset_t)ior->io_data);
+ } else {
+ register vm_offset_t end_alloc;
+
+ end_alloc = start_sent + round_page(ior->io_alloc_size);
+ if (end_alloc > end_sent)
+ vm_deallocate(mach_task_self (),
+ end_sent,
+ end_alloc - end_sent);
+ }
+ }
+
+ mach_device_deallocate(ior->io_device);
+
+ return (TRUE);
+}
+
+/*
+ * Read from a device.
+ */
+static io_return_t
+device_read(device, reply_port, reply_port_type, mode, recnum,
+ bytes_wanted, data, data_count)
+ mach_device_t device;
+ mach_port_t reply_port;
+ mach_msg_type_name_t reply_port_type;
+ dev_mode_t mode;
+ recnum_t recnum;
+ int bytes_wanted;
+ io_buf_ptr_t *data; /* out */
+ unsigned int *data_count; /* out */
+{
+ register io_req_t ior;
+ register io_return_t result;
+
+#ifdef lint
+ *data = *data;
+ *data_count = *data_count;
+#endif /* lint */
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * There must be a reply port.
+ */
+ if (!MACH_PORT_VALID(reply_port)) {
+ printf("ds_* invalid reply port\n");
+ return (MIG_NO_REPLY); /* no sense in doing anything */
+ }
+
+ /*
+ * Package the read request for the device driver
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_READ | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = 0; /* driver must allocate data */
+ ior->io_count = bytes_wanted;
+ ior->io_alloc_size = 0; /* no data allocated yet */
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_read_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the read.
+ */
+ result = (*device->dev_ops->d_read)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return result via ds_read_done.
+ */
+ ior->io_error = result;
+ (void) ds_read_done(ior);
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply has already been sent. */
+}
+
+/*
+ * Read from a device, but return the data 'inband.'
+ */
+static io_return_t
+device_read_inband(device, reply_port, reply_port_type, mode, recnum,
+ bytes_wanted, data, data_count)
+ mach_device_t device;
+ mach_port_t reply_port;
+ mach_msg_type_name_t reply_port_type;
+ dev_mode_t mode;
+ recnum_t recnum;
+ int bytes_wanted;
+ char *data; /* pointer to OUT array */
+ unsigned int *data_count; /* out */
+{
+ register io_req_t ior;
+ register io_return_t result;
+
+#ifdef lint
+ *data = *data;
+ *data_count = *data_count;
+#endif /* lint */
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * There must be a reply port.
+ */
+ if (!MACH_PORT_VALID(reply_port)) {
+ printf("ds_* invalid reply port\n");
+ return (MIG_NO_REPLY); /* no sense in doing anything */
+ }
+
+ /*
+ * Package the read for the device driver
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_READ | IO_CALL | IO_INBAND;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = 0; /* driver must allocate data */
+ ior->io_count =
+ ((bytes_wanted < sizeof(io_buf_ptr_inband_t)) ?
+ bytes_wanted : sizeof(io_buf_ptr_inband_t));
+ ior->io_alloc_size = 0; /* no data allocated yet */
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_read_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * Do the read.
+ */
+ result = (*device->dev_ops->d_read)(device->dev_number, ior);
+
+ /*
+ * If the io was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return result, via ds_read_done.
+ */
+ ior->io_error = result;
+ (void) ds_read_done(ior);
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply has already been sent. */
+}
+
+static io_return_t
+device_set_status(device, flavor, status, status_count)
+ mach_device_t device;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ mach_msg_type_number_t status_count;
+{
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ return ((*device->dev_ops->d_setstat)(device->dev_number,
+ flavor,
+ status,
+ status_count));
+}
+
+static io_return_t
+device_get_status(device, flavor, status, status_count)
+ mach_device_t device;
+ dev_flavor_t flavor;
+ dev_status_t status; /* pointer to OUT array */
+ mach_msg_type_number_t *status_count; /* out */
+{
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ return ((*device->dev_ops->d_getstat)(device->dev_number,
+ flavor,
+ status,
+ status_count));
+}
+
+/*
+ * Allocate wired-down memory for device read.
+ */
+kern_return_t device_read_alloc(ior, size)
+ register io_req_t ior;
+ register vm_size_t size;
+{
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ /*
+ * Nothing to do if no data.
+ */
+ if (ior->io_count == 0)
+ return (KERN_SUCCESS);
+
+ if (ior->io_op & IO_INBAND) {
+ ior->io_data = (io_buf_ptr_t) malloc(sizeof(io_buf_ptr_inband_t));
+ ior->io_alloc_size = sizeof(io_buf_ptr_inband_t);
+ } else {
+ size = round_page(size);
+ kr = vm_allocate (mach_task_self (), &addr, size, TRUE);
+// kr = kmem_alloc(kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return (kr);
+
+ ior->io_data = (io_buf_ptr_t) addr;
+ ior->io_alloc_size = size;
+ }
+
+ return (KERN_SUCCESS);
+}
+
+struct thread_wait
+{
+ struct condition cond;
+ struct mutex mutex;
+ int v;
+};
+
+static struct thread_wait io_done_wait;
+
+void thread_wait_init (struct thread_wait *t)
+{
+ mutex_init (&t->mutex);
+ condition_init (&t->cond);
+ t->v = 0;
+}
+
+void thread_block (struct thread_wait *t)
+{
+ mutex_lock (&t->mutex);
+ t->v = 1;
+ while (t->v)
+ hurd_condition_wait (&t->cond, &t->mutex);
+ mutex_unlock (&t->mutex);
+}
+
+void thread_wakeup (struct thread_wait *t)
+{
+ mutex_lock (&t->mutex);
+ t->v = 0;
+ condition_signal (&t->cond);
+ mutex_unlock (&t->mutex);
+}
+
+queue_head_t io_done_list;
+struct mutex io_done_list_lock;
+
+#define splio splsched /* XXX must block ALL io devices */
+
+void iodone(ior)
+ register io_req_t ior;
+{
+ register spl_t s;
+
+ /*
+ * If this ior was loaned to us, return it directly.
+ */
+ if (ior->io_op & IO_LOANED) {
+ (*ior->io_done)(ior);
+ return;
+ }
+ /*
+ * If !IO_CALL, some thread is waiting for this. Must lock
+ * structure to interlock correctly with iowait(). Else can
+ * toss on queue for io_done thread to call completion.
+ */
+ s = splio();
+ if ((ior->io_op & IO_CALL) == 0) {
+ ior_lock(ior);
+ ior->io_op |= IO_DONE;
+ ior->io_op &= ~IO_WANTED;
+ ior_unlock(ior);
+// thread_wakeup((event_t)ior);
+ } else {
+ ior->io_op |= IO_DONE;
+ mutex_lock (&io_done_list_lock);
+ enqueue_tail(&io_done_list, (queue_entry_t)ior);
+ thread_wakeup (&io_done_wait);
+// thread_wakeup((event_t)&io_done_list);
+ mutex_unlock (&io_done_list_lock);
+ }
+ splx(s);
+}
+
+void wakeup_io_done_thread ()
+{
+ thread_wakeup (&io_done_wait);
+}
+
+void io_done_thread_continue()
+{
+ for (;;) {
+ extern void free_skbuffs ();
+ register spl_t s;
+ register io_req_t ior;
+
+ free_skbuffs ();
+ s = splio();
+ mutex_lock(&io_done_list_lock);
+ while ((ior = (io_req_t)dequeue_head(&io_done_list)) != 0) {
+ mutex_unlock(&io_done_list_lock);
+ splx(s);
+
+ if ((*ior->io_done)(ior)) {
+ /*
+ * IO done - free io_req_elt
+ */
+ io_req_free(ior);
+ }
+ /* else routine has re-queued it somewhere */
+
+ s = splio();
+ mutex_lock(&io_done_list_lock);
+ }
+
+// assert_wait(&io_done_list, FALSE);
+ mutex_unlock(&io_done_list_lock);
+ splx(s);
+// counter(c_io_done_thread_block++);
+// thread_block(io_done_thread_continue);
+ thread_block (&io_done_wait);
+ }
+}
+
+
+void
+wire_thread()
+{
+ kern_return_t kr;
+ mach_port_t priv_host_port;
+
+ kr = get_privileged_ports (&priv_host_port, NULL);
+ if (kr != KERN_SUCCESS)
+ panic("get privileged port: %d", kr);
+
+ kr = thread_wire(priv_host_port,
+ mach_thread_self(),
+ TRUE);
+ if (kr != KERN_SUCCESS)
+ panic("wire_thread: %d", kr);
+}
+
+void
+thread_set_own_priority (int priority)
+{
+ kern_return_t kr;
+ mach_port_t priv_host_port;
+ mach_port_t pset, psetcntl;
+
+ kr = get_privileged_ports (&priv_host_port, NULL);
+ if (kr != KERN_SUCCESS)
+ panic("get privileged port: %d", kr);
+
+ kr = thread_get_assignment (mach_thread_self (), &pset);
+ if (kr != KERN_SUCCESS)
+ panic("thread get assignment: %d", kr);
+ kr = host_processor_set_priv (priv_host_port, pset, &psetcntl);
+ if (kr != KERN_SUCCESS)
+ panic("processor set priv: %d", kr);
+ kr = thread_max_priority (mach_thread_self (), psetcntl, 0);
+ if (kr != KERN_SUCCESS)
+ panic("set thread max priority: %d", kr);
+ kr = thread_priority (mach_thread_self (), 0, FALSE);
+ if (kr != KERN_SUCCESS)
+ panic("set thread priority: %d", kr);
+}
+
+any_t io_done_thread(any_t unused)
+{
+ /*
+ * Set thread privileges and highest priority.
+ */
+// current_thread()->vm_privilege = TRUE;
+// stack_privilege(current_thread());
+ wire_thread ();
+
+ thread_set_own_priority(0);
+
+ io_done_thread_continue();
+ /*NOTREACHED*/
+ return 0;
+}
+
+void mach_device_init()
+{
+// vm_offset_t device_io_min, device_io_max;
+
+ queue_init(&io_done_list);
+ mutex_init (&io_done_list_lock);
+ thread_wait_init (&io_done_wait);
+
+// device_io_map = kmem_suballoc(kernel_map,
+// &device_io_min,
+// &device_io_max,
+// DEVICE_IO_MAP_SIZE,
+// FALSE);
+ /*
+ * If the kernel receives many device_write requests, the
+ * device_io_map might run out of space. To prevent
+ * device_write_get from failing in this case, we enable
+ * wait_for_space on the map. This causes kmem_io_map_copyout
+ * to block until there is sufficient space.
+ * (XXX Large writes may be starved by small writes.)
+ *
+ * There is a potential deadlock problem with this solution,
+ * if a device_write from the default pager has to wait
+ * for the completion of a device_write which needs to wait
+ * for memory allocation. Hence, once device_write_get
+ * allocates space in device_io_map, no blocking memory
+ * allocations should happen until device_write_dealloc
+ * frees the space. (XXX A large write might starve
+ * a small write from the default pager.)
+ */
+// device_io_map->wait_for_space = TRUE;
+
+// io_inband_zone = zinit(sizeof(io_buf_ptr_inband_t), 0,
+// 1000 * sizeof(io_buf_ptr_inband_t),
+// 10 * sizeof(io_buf_ptr_inband_t),
+// FALSE,
+// "io inband read buffers");
+}
+
+struct device_emulation_ops mach_device_emulation_ops =
+{
+ (void*) mach_device_reference,
+ (void*) mach_device_deallocate,
+ (void*) mach_convert_device_to_port,
+ device_open,
+ device_close,
+ NULL,
+ NULL,
+ device_read,
+ device_read_inband,
+ device_set_status,
+ device_get_status,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
diff --git a/pcnet32/ds_routines.h b/pcnet32/ds_routines.h
new file mode 100644
index 000000000..e314e80e5
--- /dev/null
+++ b/pcnet32/ds_routines.h
@@ -0,0 +1,55 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Device service utility routines.
+ */
+
+#ifndef DS_ROUTINES_H
+#define DS_ROUTINES_H
+
+#include <mach.h>
+
+#include "io_req.h"
+
+/*
+ * Map for device IO memory.
+ */
+//vm_map_t device_io_map;
+
+kern_return_t device_read_alloc(io_req_t, vm_size_t);
+kern_return_t device_write_get(io_req_t, boolean_t *);
+boolean_t device_write_dealloc(io_req_t);
+
+boolean_t ds_open_done(io_req_t);
+boolean_t ds_read_done(io_req_t);
+boolean_t ds_write_done(io_req_t);
+
+void iowait (io_req_t ior);
+
+#endif /* DS_ROUTINES_H */
diff --git a/pcnet32/if.h b/pcnet32/if.h
new file mode 100644
index 000000000..9a03f0a20
--- /dev/null
+++ b/pcnet32/if.h
@@ -0,0 +1,183 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the INET interface module.
+ *
+ * Version: @(#)if.h 1.0.2 04/18/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1982-1988
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_H
+#define _LINUX_IF_H
+
+#include <sys/socket.h> /* for "struct sockaddr" et al */
+
+/* Standard interface flags. */
+
+#ifdef MACH_INCLUDE
+
+#define LINUX_IFF_UP 0x1 /* interface is up */
+#define LINUX_IFF_BROADCAST 0x2 /* broadcast address valid */
+#define LINUX_IFF_DEBUG 0x4 /* turn on debugging */
+#define LINUX_IFF_LOOPBACK 0x8 /* is a loopback net */
+#define LINUX_IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define LINUX_IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define LINUX_IFF_RUNNING 0x40 /* resources allocated */
+#define LINUX_IFF_NOARP 0x80 /* no ARP protocol */
+#define LINUX_IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define LINUX_IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define LINUX_IFF_MASTER 0x400 /* master of a load balancer */
+#define LINUX_IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define LINUX_IFF_MULTICAST 0x1000 /* Supports multicast */
+#define LINUX_IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+
+#else /* !MACH_INCLUDE */
+
+#define IFF_UP 0x1 /* interface is up */
+#define IFF_BROADCAST 0x2 /* broadcast address valid */
+#define IFF_DEBUG 0x4 /* turn on debugging */
+#define IFF_LOOPBACK 0x8 /* is a loopback net */
+#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define IFF_RUNNING 0x40 /* resources allocated */
+#define IFF_NOARP 0x80 /* no ARP protocol */
+#define IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define IFF_MASTER 0x400 /* master of a load balancer */
+#define IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define IFF_MULTICAST 0x1000 /* Supports multicast */
+#define IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+#endif /* !MACH_INCLUDE */
+
+/*
+ * The ifaddr structure contains information about one address
+ * of an interface. They are maintained by the different address
+ * families, are allocated and attached when an address is set,
+ * and are linked together so all addresses for an interface can
+ * be located.
+ */
+
+struct ifaddr
+{
+ struct sockaddr ifa_addr; /* address of interface */
+ union {
+ struct sockaddr ifu_broadaddr;
+ struct sockaddr ifu_dstaddr;
+ } ifa_ifu;
+ struct iface *ifa_ifp; /* back-pointer to interface */
+ struct ifaddr *ifa_next; /* next address for interface */
+};
+
+#define ifa_broadaddr ifa_ifu.ifu_broadaddr /* broadcast address */
+#define ifa_dstaddr ifa_ifu.ifu_dstaddr /* other end of link */
+
+/*
+ * Device mapping structure. I'd just gone off and designed a
+ * beautiful scheme using only loadable modules with arguments
+ * for driver options and along come the PCMCIA people 8)
+ *
+ * Ah well. The get() side of this is good for WDSETUP, and it'll
+ * be handy for debugging things. The set side is fine for now and
+ * being very small might be worth keeping for clean configuration.
+ */
+
+struct ifmap
+{
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+ /* 3 bytes spare */
+};
+
+/*
+ * Interface request structure used for socket
+ * ioctl's. All interface ioctl's must have parameter
+ * definitions which begin with ifr_name. The
+ * remainder may be interface specific.
+ */
+
+struct ifreq
+{
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union
+ {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_metric;
+ int ifru_mtu;
+ struct ifmap ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ caddr_t ifru_data;
+ } ifr_ifru;
+};
+
+#define ifr_name ifr_ifrn.ifrn_name /* interface name */
+#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-p lnk */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */
+#define ifr_flags ifr_ifru.ifru_flags /* flags */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+#define ifr_map ifr_ifru.ifru_map /* device map */
+#define ifr_slave ifr_ifru.ifru_slave /* slave device */
+#define ifr_data ifr_ifru.ifru_data /* for use by interface */
+
+/*
+ * Structure used in SIOCGIFCONF request.
+ * Used to retrieve interface configuration
+ * for machine (useful for programs which
+ * must know all networks accessible).
+ */
+
+struct ifconf
+{
+ int ifc_len; /* size of buffer */
+ union
+ {
+ caddr_t ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+};
+#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
+#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
+
+#endif /* _LINUX_IF_H */
diff --git a/pcnet32/if_arp.h b/pcnet32/if_arp.h
new file mode 100644
index 000000000..5f8b23707
--- /dev/null
+++ b/pcnet32/if_arp.h
@@ -0,0 +1,132 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the ARP (RFC 826) protocol.
+ *
+ * Version: @(#)if_arp.h 1.0.2 08/12/96
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988
+ * Portions taken from the KA9Q/NOS (v2.00m PA0GRI) source.
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Florian La Roche,
+ * Jonathan Layes, <layes@loran.com>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_ARP_H
+#define _LINUX_IF_ARP_H
+
+#include <sys/socket.h>
+
+#include "netdevice.h"
+
+/* ARP protocol HARDWARE identifiers. */
+#define ARPHRD_NETROM 0 /* from KA9Q: NET/ROM pseudo */
+#define ARPHRD_ETHER 1 /* Ethernet 10Mbps */
+#define ARPHRD_EETHER 2 /* Experimental Ethernet */
+#define ARPHRD_AX25 3 /* AX.25 Level 2 */
+#define ARPHRD_PRONET 4 /* PROnet token ring */
+#define ARPHRD_CHAOS 5 /* Chaosnet */
+#define ARPHRD_IEEE802 6 /* IEEE 802.2 Ethernet/TR/TB */
+#define ARPHRD_ARCNET 7 /* ARCnet */
+#define ARPHRD_APPLETLK 8 /* APPLEtalk */
+#define ARPHRD_DLCI 15 /* Frame Relay DLCI */
+#define ARPHRD_METRICOM 23 /* Metricom STRIP (new IANA id) */
+
+/* Dummy types for non ARP hardware */
+#define ARPHRD_SLIP 256
+#define ARPHRD_CSLIP 257
+#define ARPHRD_SLIP6 258
+#define ARPHRD_CSLIP6 259
+#define ARPHRD_RSRVD 260 /* Notional KISS type */
+#define ARPHRD_ADAPT 264
+#define ARPHRD_ROSE 270
+#define ARPHRD_PPP 512
+
+#define ARPHRD_TUNNEL 768 /* IPIP tunnel */
+#define ARPHRD_TUNNEL6 769 /* IPIP6 tunnel */
+#define ARPHRD_FRAD 770 /* Frame Relay Access Device */
+#define ARPHRD_SKIP 771 /* SKIP vif */
+#define ARPHRD_LOOPBACK 772 /* Loopback device */
+#define ARPHRD_LOCALTLK 773 /* Localtalk device */
+#define ARPHRD_FDDI 774 /* Fiber Distributed Data Interface */
+
+/* ARP protocol opcodes. */
+#define ARPOP_REQUEST 1 /* ARP request */
+#define ARPOP_REPLY 2 /* ARP reply */
+#define ARPOP_RREQUEST 3 /* RARP request */
+#define ARPOP_RREPLY 4 /* RARP reply */
+
+
+/* ARP ioctl request. */
+struct arpreq {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+ char arp_dev[16];
+};
+
+struct arpreq_old {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+};
+
+/* ARP Flag values. */
+#define ATF_COM 0x02 /* completed entry (ha valid) */
+#define ATF_PERM 0x04 /* permanent entry */
+#define ATF_PUBL 0x08 /* publish entry */
+#define ATF_USETRAILERS 0x10 /* has requested trailers */
+#define ATF_NETMASK 0x20 /* want to use a netmask (only
+ for proxy entries) */
+
+/*
+ * This structure defines an ethernet arp header.
+ */
+
+struct arphdr
+{
+ unsigned short ar_hrd; /* format of hardware address */
+ unsigned short ar_pro; /* format of protocol address */
+ unsigned char ar_hln; /* length of hardware address */
+ unsigned char ar_pln; /* length of protocol address */
+ unsigned short ar_op; /* ARP opcode (command) */
+
+#if 0
+ /*
+ * Ethernet looks like this : This bit is variable sized however...
+ */
+ unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
+ unsigned char ar_sip[4]; /* sender IP address */
+ unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
+ unsigned char ar_tip[4]; /* target IP address */
+#endif
+
+};
+
+/* Support for the user space arp daemon, arpd */
+
+#define ARPD_UPDATE 0x01
+#define ARPD_LOOKUP 0x02
+#define ARPD_FLUSH 0x03
+
+struct arpd_request
+{
+ unsigned short req; /* request type */
+ __u32 ip; /* ip address of entry */
+ unsigned long dev; /* Device entry is tied to */
+ unsigned long stamp;
+ unsigned long updated;
+ unsigned char ha[MAX_ADDR_LEN]; /* Hardware address */
+};
+
+#endif /* _LINUX_IF_ARP_H */
diff --git a/pcnet32/if_ether.h b/pcnet32/if_ether.h
new file mode 100644
index 000000000..dd09d8352
--- /dev/null
+++ b/pcnet32/if_ether.h
@@ -0,0 +1,119 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the Ethernet IEEE 802.3 interface.
+ *
+ * Version: @(#)if_ether.h 1.0.1a 02/08/94
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <alan@cymru.net>
+ * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_IF_ETHER_H
+#define _LINUX_IF_ETHER_H
+
+/*
+ * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
+ * and FCS/CRC (frame check sequence).
+ */
+
+#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_HLEN 14 /* Total octets in header. */
+#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
+#define ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+
+/*
+ * These are the defined Ethernet Protocol ID's.
+ */
+
+#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
+#define ETH_P_ECHO 0x0200 /* Ethernet Echo packet */
+#define ETH_P_PUP 0x0400 /* Xerox PUP packet */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_X25 0x0805 /* CCITT X.25 */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_DEC 0x6000 /* DEC Assigned proto */
+#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */
+#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */
+#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */
+#define ETH_P_LAT 0x6004 /* DEC LAT */
+#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
+#define ETH_P_CUST 0x6006 /* DEC Customer use */
+#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
+#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
+#define ETH_P_ATALK 0x809B /* Appletalk DDP */
+#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
+#define ETH_P_IPX 0x8137 /* IPX over DIX */
+#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
+
+/*
+ * Non DIX types. Won't clash for 1500 types.
+ */
+
+#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
+#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
+#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
+#define ETH_P_802_2 0x0004 /* 802.2 frames */
+#define ETH_P_SNAP 0x0005 /* Internal only */
+#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */
+#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
+#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
+#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
+#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
+#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
+
+/*
+ * This is an Ethernet frame header.
+ */
+
+struct ethhdr
+{
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ unsigned short h_proto; /* packet type ID field */
+};
+
+/*
+ * Ethernet statistics collection data.
+ */
+
+struct enet_statistics
+{
+ int rx_packets; /* total packets received */
+ int tx_packets; /* total packets transmitted */
+ int rx_errors; /* bad packets received */
+ int tx_errors; /* packet transmit problems */
+ int rx_dropped; /* no space in linux buffers */
+ int tx_dropped; /* no space available in linux */
+ int multicast; /* multicast packets received */
+ int collisions;
+
+ /* detailed rx_errors: */
+ int rx_length_errors;
+ int rx_over_errors; /* receiver ring buff overflow */
+ int rx_crc_errors; /* recved pkt with crc error */
+ int rx_frame_errors; /* recv'd frame alignment error */
+ int rx_fifo_errors; /* recv'r fifo overrun */
+ int rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+ int tx_aborted_errors;
+ int tx_carrier_errors;
+ int tx_fifo_errors;
+ int tx_heartbeat_errors;
+ int tx_window_errors;
+};
+
+
+#endif /* _LINUX_IF_ETHER_H */
diff --git a/pcnet32/io_req.h b/pcnet32/io_req.h
new file mode 100644
index 000000000..df8d743c4
--- /dev/null
+++ b/pcnet32/io_req.h
@@ -0,0 +1,135 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 10/88
+ */
+
+#ifndef _IO_REQ_
+#define _IO_REQ_
+
+#include <mach.h>
+#include <cthreads.h>
+
+#include "dev_hdr.h"
+
+/*
+ * IO request element, queued on device for delayed replies.
+ */
+typedef struct io_req *io_req_t;
+struct io_req {
+ struct io_req * io_next; /* next, ... */
+ struct io_req * io_prev; /* prev pointers: link in done,
+ defered, or in-progress list */
+ mach_device_t io_device; /* pointer to open-device structure */
+ char * io_dev_ptr; /* pointer to driver structure -
+ filled in by driver if necessary */
+ int io_unit; /* unit number ('minor') of device */
+ int io_op; /* IO operation */
+ dev_mode_t io_mode; /* operation mode (wait, truncate) */
+ recnum_t io_recnum; /* starting record number for
+ random-access devices */
+
+ union io_un {
+ io_buf_ptr_t data; /* data, for IO requests */
+ } io_un;
+#define io_data io_un.data
+
+ long io_count; /* amount requested */
+ long io_alloc_size; /* amount allocated */
+ long io_residual; /* amount NOT done */
+ io_return_t io_error; /* error code */
+ /* call when done - returns TRUE if IO really finished */
+ boolean_t (*io_done)(io_req_t);
+ mach_port_t io_reply_port; /* reply port, for asynchronous
+ messages */
+ mach_msg_type_name_t io_reply_port_type;
+ /* send or send-once right? */
+ struct io_req * io_link; /* forward link (for driver header) */
+ struct io_req * io_rlink; /* reverse link (for driver header) */
+// vm_map_copy_t io_copy; /* vm_map_copy obj. for this op. */
+ long io_total; /* total op size, for write */
+ struct mutex io_req_lock;
+// decl_simple_lock_data(,io_req_lock)
+ /* Lock for this structure */
+ long io_physrec; /* mapping to the physical block
+ number */
+ long io_rectotal; /* total number of blocks to move */
+};
+
+/*
+ * LOCKING NOTE: Operations on io_req's are in general single threaded by
+ * the invoking code, obviating the need for a lock. The usual IO_CALL
+ * path through the code is: Initiating thread hands io_req to device driver,
+ * driver passes it to io_done thread, io_done thread sends reply message. No
+ * locking is needed in this sequence. Unfortunately, a synchronous wait
+ * for a buffer requires a lock to avoid problems if the wait and interrupt
+ * happen simultaneously on different processors.
+ */
+
+#define ior_lock(ior) mutex_lock(&(ior)->io_req_lock)
+#define ior_unlock(ior) mutex_unlock(&(ior)->io_req_lock)
+
+/*
+ * Flags and operations
+ */
+
+#define IO_WRITE 0x00000000 /* operation is write */
+#define IO_READ 0x00000001 /* operation is read */
+#define IO_OPEN 0x00000002 /* operation is open */
+#define IO_DONE 0x00000100 /* operation complete */
+#define IO_ERROR 0x00000200 /* error on operation */
+#define IO_BUSY 0x00000400 /* operation in progress */
+#define IO_WANTED 0x00000800 /* wakeup when no longer BUSY */
+#define IO_BAD 0x00001000 /* bad disk block */
+#define IO_CALL 0x00002000 /* call io_done_thread when done */
+#define IO_INBAND 0x00004000 /* mig call was inband */
+#define IO_INTERNAL 0x00008000 /* internal, device-driver specific */
+#define IO_LOANED 0x00010000 /* ior loaned by another module */
+
+#define IO_SPARE_START 0x00020000 /* start of spare flags */
+
+/*
+ * Standard completion routine for io_requests.
+ */
+void iodone(io_req_t);
+
+/*
+ * Macros to allocate and free IORs - will convert to zones later.
+ */
+#define io_req_alloc(ior,size) \
+ MACRO_BEGIN \
+ (ior) = (io_req_t)malloc(sizeof(struct io_req)); \
+ mutex_init(&(ior)->io_req_lock); \
+ MACRO_END
+
+#define io_req_free(ior) \
+ (free(ior))
+
+
+//zone_t io_inband_zone; /* for inband reads */
+
+#endif /* _IO_REQ_ */
diff --git a/pcnet32/irq.c b/pcnet32/irq.c
new file mode 100644
index 000000000..f82e8b11c
--- /dev/null
+++ b/pcnet32/irq.c
@@ -0,0 +1,34 @@
+#include <error.h>
+
+#include <mach.h>
+#include <hurd.h>
+
+#include "netdevice.h"
+#include "device_U.h"
+#include "irq.h"
+
+extern mach_port_t master_device;
+
+/*
+ * Install the irq in the kernel.
+ */
+int
+request_irq (struct linux_device *dev,
+ void (*handler) (int), unsigned long flags)
+{
+ return device_intr_notify (master_device, dev->irq, dev->dev_id,
+ ports_get_right (dev), MACH_MSG_TYPE_MAKE_SEND);
+}
+
+/*
+ * Deallocate an irq.
+ */
+void
+free_irq (struct linux_device *dev)
+{
+ error_t err;
+ err = device_intr_notify (master_device, dev->irq, dev->dev_id,
+ MACH_PORT_NULL, MACH_MSG_TYPE_MAKE_SEND);
+ if (err)
+ error (0, err, "device_intr_notify");
+}
diff --git a/pcnet32/irq.h b/pcnet32/irq.h
new file mode 100644
index 000000000..14c48e5ae
--- /dev/null
+++ b/pcnet32/irq.h
@@ -0,0 +1,27 @@
+#ifndef __IRQ_H__
+
+#define __IRQ_H__
+
+#include <device/device_types.h>
+#include <mach.h>
+
+#include "netdevice.h"
+
+void deliver_irq (int irq);
+
+typedef struct
+{
+ mach_msg_header_t irq_header;
+ mach_msg_type_t irq_type;
+ int irq;
+} mach_irq_notification_t;
+
+#define IRQ_NOTIFY_MSGH_SEQNO 0
+#define MACH_NOTIFY_IRQ 100
+
+int request_irq (struct linux_device *dev,
+ void (*handler) (int), unsigned long flags);
+
+void free_irq (struct linux_device *dev);
+
+#endif
diff --git a/pcnet32/kmem.c b/pcnet32/kmem.c
new file mode 100644
index 000000000..5bd6a02df
--- /dev/null
+++ b/pcnet32/kmem.c
@@ -0,0 +1,481 @@
+/*
+ * Linux memory allocation.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ *
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include "mach_U.h"
+#include <hurd.h>
+#include <cthreads.h>
+
+#include "util.h"
+#include "vm_param.h"
+
+extern int printf (const char *, ...);
+
+/* Amount of memory to reserve for Linux memory allocator.
+ We reserve 64K chunks to stay within DMA limits.
+ Increase MEM_CHUNKS if the kernel is running out of memory. */
+#define MEM_CHUNK_SIZE (64 * 1024)
+#define MEM_CHUNKS 7
+
+/* Mininum amount that linux_kmalloc will allocate. */
+#define MIN_ALLOC 12
+
+#ifndef NBPW
+#define NBPW 32
+#endif
+
+/* Memory block header. */
+struct blkhdr
+{
+ unsigned short free; /* 1 if block is free */
+ unsigned short size; /* size of block */
+};
+
+/* This structure heads a page allocated by linux_kmalloc. */
+struct pagehdr
+{
+ unsigned size; /* size (multiple of PAGE_SIZE) */
+ struct pagehdr *next; /* next header in list */
+};
+
+/* This structure describes a memory chunk. */
+struct chunkhdr
+{
+ vm_address_t start; /* start address */
+ vm_address_t pstart; /* start physical address */
+ vm_address_t end; /* end address */
+ unsigned long bitmap; /* busy/free bitmap of pages */
+};
+
+unsigned long __get_free_pages (unsigned long order, int dma);
+void free_pages (unsigned long addr, unsigned long order);
+
+static struct mutex mem_lock = MUTEX_INITIALIZER;
+
+/* Chunks from which pages are allocated. */
+static struct chunkhdr pages_free[MEM_CHUNKS];
+
+/* Memory list maintained by linux_kmalloc. */
+static struct pagehdr *memlist;
+
+/* Some statistics. */
+int num_block_coalesce = 0;
+int num_page_collect = 0;
+int linux_mem_avail;
+
+int virt_to_phys (vm_address_t addr)
+{
+ int i;
+
+ for (i = 0; i < MEM_CHUNKS; i++)
+ {
+ if (pages_free[i].start <= addr && pages_free[i].end > addr)
+ return addr - pages_free[i].start + pages_free[i].pstart;
+ }
+ debug ("an address not in any chunks.");
+ abort ();
+}
+
+/* Initialize the Linux memory allocator. */
+void
+linux_kmem_init ()
+{
+ extern mach_port_t priv_host;
+ int i, j;
+
+ for (i = 0; i < MEM_CHUNKS; i++)
+ {
+ error_t err;
+
+ /* Allocate memory. */
+ err = vm_dma_buff_alloc (priv_host, mach_task_self (),
+ MEM_CHUNK_SIZE, &pages_free[i].start,
+ &pages_free[i].pstart);
+ if (err)
+ abort ();
+
+ assert (pages_free[i].start);
+// assert ((pages_free[i].start & 0xffff) == 0);
+
+// /* Sanity check: ensure pages are contiguous and within DMA limits. */
+// for (p = pages, j = 0; j < MEM_CHUNK_SIZE - PAGE_SIZE; j += PAGE_SIZE)
+// {
+// assert (p->phys_addr < 16 * 1024 * 1024);
+// assert (p->phys_addr + PAGE_SIZE
+// == ((vm_page_t) p->pageq.next)->phys_addr);
+//
+// p = (vm_page_t) p->pageq.next;
+// }
+
+ pages_free[i].end = pages_free[i].start + MEM_CHUNK_SIZE;
+ assert (pages_free[i].end <= 16 * 1024 * 1024);
+
+ /* Initialize free page bitmap. */
+ pages_free[i].bitmap = 0;
+ j = MEM_CHUNK_SIZE >> PAGE_SHIFT;
+ while (--j >= 0)
+ pages_free[i].bitmap |= 1 << j;
+ }
+
+ linux_mem_avail = (MEM_CHUNKS * MEM_CHUNK_SIZE) >> PAGE_SHIFT;
+}
+
+/* Return the number by which the page size should be
+ shifted such that the resulting value is >= SIZE. */
+static unsigned long
+get_page_order (int size)
+{
+ unsigned long order;
+
+ for (order = 0; (PAGE_SIZE << order) < size; order++)
+ ;
+ return order;
+}
+
+#ifdef LINUX_DEV_DEBUG
+static void
+check_page_list (int line)
+{
+ unsigned size;
+ struct pagehdr *ph;
+ struct blkhdr *bh;
+
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ if ((int) ph & PAGE_MASK)
+ panic ("%s:%d: page header not aligned", __FILE__, line);
+
+ size = 0;
+ bh = (struct blkhdr *) (ph + 1);
+ while (bh < (struct blkhdr *) ((void *) ph + ph->size))
+ {
+ size += bh->size + sizeof (struct blkhdr);
+ bh = (void *) (bh + 1) + bh->size;
+ }
+
+ if (size + sizeof (struct pagehdr) != ph->size)
+ panic ("%s:%d: memory list destroyed", __FILE__, line);
+ }
+}
+#else
+#define check_page_list(line)
+#endif
+
+/* Merge adjacent free blocks in the memory list. */
+static void
+coalesce_blocks ()
+{
+ struct pagehdr *ph;
+ struct blkhdr *bh, *bhp, *ebh;
+
+ num_block_coalesce++;
+
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ ebh = (struct blkhdr *) ((void *) ph + ph->size);
+ while (1)
+ {
+ /* Skip busy blocks. */
+ while (bh < ebh && !bh->free)
+ bh = (struct blkhdr *) ((void *) (bh + 1) + bh->size);
+ if (bh == ebh)
+ break;
+
+ /* Merge adjacent free blocks. */
+ while (1)
+ {
+ bhp = (struct blkhdr *) ((void *) (bh + 1) + bh->size);
+ if (bhp == ebh)
+ {
+ bh = bhp;
+ break;
+ }
+ if (!bhp->free)
+ {
+ bh = (struct blkhdr *) ((void *) (bhp + 1) + bhp->size);
+ break;
+ }
+ bh->size += bhp->size + sizeof (struct blkhdr);
+ }
+ }
+ }
+}
+
+/* Allocate SIZE bytes of memory.
+ The PRIORITY parameter specifies various flags
+ such as DMA, atomicity, etc. It is not used by Mach. */
+void *
+linux_kmalloc (unsigned int size, int priority)
+{
+ int order, coalesced = 0;
+ struct pagehdr *ph;
+ struct blkhdr *bh, *new_bh;
+
+ if (size < MIN_ALLOC)
+ size = MIN_ALLOC;
+ else
+ size = (size + sizeof (int) - 1) & ~(sizeof (int) - 1);
+
+ assert (size <= (MEM_CHUNK_SIZE
+ - sizeof (struct pagehdr)
+ - sizeof (struct blkhdr)));
+
+ mutex_lock (&mem_lock);
+
+again:
+ check_page_list (__LINE__);
+
+ /* Walk the page list and find the first free block with size
+ greater than or equal to the one required. */
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ while (bh < (struct blkhdr *) ((void *) ph + ph->size))
+ {
+ if (bh->free && bh->size >= size)
+ {
+ bh->free = 0;
+ if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr))
+ {
+ /* Split the current block and create a new free block. */
+ new_bh = (void *) (bh + 1) + size;
+ new_bh->free = 1;
+ new_bh->size = bh->size - size - sizeof (struct blkhdr);
+ bh->size = size;
+ }
+
+ check_page_list (__LINE__);
+
+ mutex_unlock (&mem_lock);
+ return bh + 1;
+ }
+ bh = (void *) (bh + 1) + bh->size;
+ }
+ }
+
+ check_page_list (__LINE__);
+
+ /* Allocation failed; coalesce free blocks and try again. */
+ if (!coalesced)
+ {
+ coalesce_blocks ();
+ coalesced = 1;
+ goto again;
+ }
+
+ /* Allocate more pages. */
+ order = get_page_order (size
+ + sizeof (struct pagehdr)
+ + sizeof (struct blkhdr));
+ ph = (struct pagehdr *) __get_free_pages (order, ~0UL);
+ if (!ph)
+ {
+ mutex_unlock (&mem_lock);
+ return NULL;
+ }
+
+ ph->size = PAGE_SIZE << order;
+ ph->next = memlist;
+ memlist = ph;
+ bh = (struct blkhdr *) (ph + 1);
+ bh->free = 0;
+ bh->size = ph->size - sizeof (struct pagehdr) - sizeof (struct blkhdr);
+ if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr))
+ {
+ new_bh = (void *) (bh + 1) + size;
+ new_bh->free = 1;
+ new_bh->size = bh->size - size - sizeof (struct blkhdr);
+ bh->size = size;
+ }
+
+ check_page_list (__LINE__);
+
+ mutex_unlock (&mem_lock);
+ return bh + 1;
+}
+
+/* Free memory P previously allocated by linux_kmalloc. */
+void
+linux_kfree (void *p)
+{
+ struct blkhdr *bh;
+ struct pagehdr *ph;
+
+ assert (((int) p & (sizeof (int) - 1)) == 0);
+
+ mutex_lock (&mem_lock);
+
+ check_page_list (__LINE__);
+
+ for (ph = memlist; ph; ph = ph->next)
+ if (p >= (void *) ph && p < (void *) ph + ph->size)
+ break;
+
+ assert (ph);
+
+ bh = (struct blkhdr *) p - 1;
+
+ assert (!bh->free);
+ assert (bh->size >= MIN_ALLOC);
+ assert ((bh->size & (sizeof (int) - 1)) == 0);
+
+ bh->free = 1;
+
+ check_page_list (__LINE__);
+
+ mutex_unlock (&mem_lock);
+}
+
+/* Free any pages that are not in use.
+ Called by __get_free_pages when pages are running low. */
+static void
+collect_kmalloc_pages ()
+{
+ struct blkhdr *bh;
+ struct pagehdr *ph, **prev_ph;
+
+ check_page_list (__LINE__);
+
+ coalesce_blocks ();
+
+ check_page_list (__LINE__);
+
+ ph = memlist;
+ prev_ph = &memlist;
+ while (ph)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ if (bh->free && (void *) (bh + 1) + bh->size == (void *) ph + ph->size)
+ {
+ *prev_ph = ph->next;
+ free_pages ((unsigned long) ph, get_page_order (ph->size));
+ ph = *prev_ph;
+ }
+ else
+ {
+ prev_ph = &ph->next;
+ ph = ph->next;
+ }
+ }
+
+ check_page_list (__LINE__);
+}
+
+/* Allocate ORDER + 1 number of physically contiguous pages.
+ PRIORITY and DMA are not used in Mach.
+ NOTE: mem_lock has been held.
+
+ XXX: This needs to be dynamic. To do that we need to make
+ the Mach page manipulation routines interrupt safe and they
+ must provide machine dependant hooks. */
+unsigned long
+__get_free_pages (unsigned long order, int dma)
+{
+ int i, pages_collected = 0;
+ unsigned bits, off, j, len;
+
+ assert ((PAGE_SIZE << order) <= MEM_CHUNK_SIZE);
+
+ /* Construct bitmap of contiguous pages. */
+ bits = 0;
+ j = 0;
+ len = 0;
+ while (len < (PAGE_SIZE << order))
+ {
+ bits |= 1 << j++;
+ len += PAGE_SIZE;
+ }
+
+again:
+
+ /* Search each chunk for the required number of contiguous pages. */
+ for (i = 0; i < MEM_CHUNKS; i++)
+ {
+ off = 0;
+ j = bits;
+ while (MEM_CHUNK_SIZE - off >= (PAGE_SIZE << order))
+ {
+ if ((pages_free[i].bitmap & j) == j)
+ {
+ pages_free[i].bitmap &= ~j;
+ linux_mem_avail -= order + 1;
+ return pages_free[i].start + off;
+ }
+ j <<= 1;
+ off += PAGE_SIZE;
+ }
+ }
+
+ /* Allocation failed; collect kmalloc and buffer pages
+ and try again. */
+ if (!pages_collected)
+ {
+ num_page_collect++;
+ collect_kmalloc_pages ();
+ pages_collected = 1;
+ goto again;
+ }
+
+ printf ("%s:%d: __get_free_pages: ran out of pages\n", __FILE__, __LINE__);
+
+ return 0;
+}
+
+/* Free ORDER + 1 number of physically
+ contiguous pages starting at address ADDR. */
+void
+free_pages (unsigned long addr, unsigned long order)
+{
+ int i;
+ unsigned bits, len, j;
+
+ assert ((addr & PAGE_MASK) == 0);
+
+ for (i = 0; i < MEM_CHUNKS; i++)
+ if (addr >= pages_free[i].start && addr < pages_free[i].end)
+ break;
+
+ assert (i < MEM_CHUNKS);
+
+ /* Contruct bitmap of contiguous pages. */
+ len = 0;
+ j = 0;
+ bits = 0;
+ while (len < (PAGE_SIZE << order))
+ {
+ bits |= 1 << j++;
+ len += PAGE_SIZE;
+ }
+ bits <<= (addr - pages_free[i].start) >> PAGE_SHIFT;
+
+ mutex_lock (&mem_lock);
+
+ assert ((pages_free[i].bitmap & bits) == 0);
+
+ pages_free[i].bitmap |= bits;
+ linux_mem_avail += order + 1;
+ mutex_unlock (&mem_lock);
+}
diff --git a/pcnet32/linux-types.h b/pcnet32/linux-types.h
new file mode 100644
index 000000000..4a515e177
--- /dev/null
+++ b/pcnet32/linux-types.h
@@ -0,0 +1,39 @@
+#ifndef _I386_TYPES_H
+#define _I386_TYPES_H
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+#define BITS_PER_LONG 32
+
+#endif
diff --git a/pcnet32/mach.defs b/pcnet32/mach.defs
new file mode 100644
index 000000000..764bd4510
--- /dev/null
+++ b/pcnet32/mach.defs
@@ -0,0 +1,779 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Matchmaker definitions file for Mach kernel interface.
+ */
+
+#ifdef MACH_KERNEL
+simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
+#endif /* MACH_KERNEL */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+ mach 2000;
+
+#ifdef KERNEL_USER
+userprefix r_;
+#endif /* KERNEL_USER */
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+skip; /* old port_allocate */
+skip; /* old port_deallocate */
+skip; /* old port_enable */
+skip; /* old port_disable */
+skip; /* old port_select */
+skip; /* old port_set_backlog */
+skip; /* old port_status */
+
+/*
+ * Create a new task with an empty set of IPC rights,
+ * and having an address space constructed from the
+ * target task (or empty, if inherit_memory is FALSE).
+ */
+routine task_create(
+ target_task : task_t;
+ inherit_memory : boolean_t;
+ out child_task : task_t);
+
+/*
+ * Destroy the target task, causing all of its threads
+ * to be destroyed, all of its IPC rights to be deallocated,
+ * and all of its address space to be deallocated.
+ */
+routine task_terminate(
+ target_task : task_t);
+
+/*
+ * Get user-level handler entry points for all
+ * emulated system calls.
+ */
+routine task_get_emulation_vector(
+ task : task_t;
+ out vector_start : int;
+ out emulation_vector: emulation_vector_t);
+
+/*
+ * Establish user-level handlers for the specified
+ * system calls. Non-emulated system calls are specified
+ * with emulation_vector[i] == EML_ROUTINE_NULL.
+ */
+routine task_set_emulation_vector(
+ task : task_t;
+ vector_start : int;
+ emulation_vector: emulation_vector_t);
+
+
+/*
+ * Returns the set of threads belonging to the target task.
+ */
+routine task_threads(
+ target_task : task_t;
+ out thread_list : thread_array_t);
+
+/*
+ * Returns information about the target task.
+ */
+routine task_info(
+ target_task : task_t;
+ flavor : int;
+ out task_info_out : task_info_t, CountInOut);
+
+
+skip; /* old task_status */
+skip; /* old task_set_notify */
+skip; /* old thread_create */
+
+/*
+ * Destroy the target thread.
+ */
+routine thread_terminate(
+ target_thread : thread_t);
+
+/*
+ * Return the selected state information for the target
+ * thread. If the thread is currently executing, the results
+ * may be stale. [Flavor THREAD_STATE_FLAVOR_LIST provides a
+ * list of valid flavors for the target thread.]
+ */
+routine thread_get_state(
+ target_thread : thread_t;
+ flavor : int;
+ out old_state : thread_state_t, CountInOut);
+
+/*
+ * Set the selected state information for the target thread.
+ * If the thread is currently executing, the state change
+ * may be ill-defined.
+ */
+routine thread_set_state(
+ target_thread : thread_t;
+ flavor : int;
+ new_state : thread_state_t);
+
+/*
+ * Returns information about the target thread.
+ */
+routine thread_info(
+ target_thread : thread_t;
+ flavor : int;
+ out thread_info_out : thread_info_t, CountInOut);
+
+skip; /* old thread_mutate */
+
+/*
+ * Allocate zero-filled memory in the address space
+ * of the target task, either at the specified address,
+ * or wherever space can be found (if anywhere is TRUE),
+ * of the specified size. The address at which the
+ * allocation actually took place is returned.
+ */
+#ifdef EMULATOR
+skip; /* the emulator redefines vm_allocate using vm_map */
+#else /* EMULATOR */
+routine vm_allocate(
+ target_task : vm_task_t;
+ inout address : vm_address_t;
+ size : vm_size_t;
+ anywhere : boolean_t);
+#endif /* EMULATOR */
+
+skip; /* old vm_allocate_with_pager */
+
+/*
+ * Deallocate the specified range from the virtual
+ * address space of the target task.
+ */
+routine vm_deallocate(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t);
+
+/*
+ * Set the current or maximum protection attribute
+ * for the specified range of the virtual address
+ * space of the target task. The current protection
+ * limits the memory access rights of threads within
+ * the task; the maximum protection limits the accesses
+ * that may be given in the current protection.
+ * Protections are specified as a set of {read, write, execute}
+ * *permissions*.
+ */
+routine vm_protect(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ set_maximum : boolean_t;
+ new_protection : vm_prot_t);
+
+/*
+ * Set the inheritance attribute for the specified range
+ * of the virtual address space of the target task.
+ * The inheritance value is one of {none, copy, share}, and
+ * specifies how the child address space should acquire
+ * this memory at the time of a task_create call.
+ */
+routine vm_inherit(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ new_inheritance : vm_inherit_t);
+
+/*
+ * Returns the contents of the specified range of the
+ * virtual address space of the target task. [The
+ * range must be aligned on a virtual page boundary,
+ * and must be a multiple of pages in extent. The
+ * protection on the specified range must permit reading.]
+ */
+routine vm_read(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ out data : pointer_t);
+
+/*
+ * Writes the contents of the specified range of the
+ * virtual address space of the target task. [The
+ * range must be aligned on a virtual page boundary,
+ * and must be a multiple of pages in extent. The
+ * protection on the specified range must permit writing.]
+ */
+routine vm_write(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ data : pointer_t);
+
+/*
+ * Copy the contents of the source range of the virtual
+ * address space of the target task to the destination
+ * range in that same address space. [Both of the
+ * ranges must be aligned on a virtual page boundary,
+ * and must be multiples of pages in extent. The
+ * protection on the source range must permit reading,
+ * and the protection on the destination range must
+ * permit writing.]
+ */
+routine vm_copy(
+ target_task : vm_task_t;
+ source_address : vm_address_t;
+ size : vm_size_t;
+ dest_address : vm_address_t);
+
+/*
+ * Returns information about the contents of the virtual
+ * address space of the target task at the specified
+ * address. The returned protection, inheritance, sharing
+ * and memory object values apply to the entire range described
+ * by the address range returned; the memory object offset
+ * corresponds to the beginning of the address range.
+ * [If the specified address is not allocated, the next
+ * highest address range is described. If no addresses beyond
+ * the one specified are allocated, the call returns KERN_NO_SPACE.]
+ */
+routine vm_region(
+ target_task : vm_task_t;
+ inout address : vm_address_t;
+ out size : vm_size_t;
+ out protection : vm_prot_t;
+ out max_protection : vm_prot_t;
+ out inheritance : vm_inherit_t;
+ out is_shared : boolean_t;
+ /* avoid out-translation of the argument */
+ out object_name : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+ out offset : vm_offset_t);
+
+/*
+ * Return virtual memory statistics for the host
+ * on which the target task resides. [Note that the
+ * statistics are not specific to the target task.]
+ */
+routine vm_statistics(
+ target_task : vm_task_t;
+ out vm_stats : vm_statistics_data_t);
+
+skip; /* old task_by_u*x_pid */
+skip; /* old vm_pageable */
+
+/*
+ * Stash a handful of ports for the target task; child
+ * tasks inherit this stash at task_create time.
+ */
+routine mach_ports_register(
+ target_task : task_t;
+ init_port_set : mach_port_array_t =
+ ^array[] of mach_port_t);
+
+/*
+ * Retrieve the stashed ports for the target task.
+ */
+routine mach_ports_lookup(
+ target_task : task_t;
+ out init_port_set : mach_port_array_t =
+ ^array[] of mach_port_t);
+
+skip; /* old u*x_pid */
+skip; /* old netipc_listen */
+skip; /* old netipc_ignore */
+
+/*
+ * Provide the data contents of a range of the given memory
+ * object, with the access restriction specified. [Only
+ * whole virtual pages of data can be accepted; partial pages
+ * will be discarded. Data should be provided on request, but
+ * may be provided in advance as desired. When data already
+ * held by this kernel is provided again, the new data is ignored.
+ * The access restriction is the subset of {read, write, execute}
+ * which are prohibited. The kernel may not provide any data (or
+ * protection) consistency among pages with different virtual page
+ * alignments within the same object.]
+ */
+simpleroutine memory_object_data_provided(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ data : pointer_t;
+ lock_value : vm_prot_t);
+
+/*
+ * Indicate that a range of the given temporary memory object does
+ * not exist, and that the backing memory object should be used
+ * instead (or zero-fill memory be used, if no backing object exists).
+ * [This call is intended for use only by the default memory manager.
+ * It should not be used to indicate a real error --
+ * memory_object_data_error should be used for that purpose.]
+ */
+simpleroutine memory_object_data_unavailable(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t);
+
+/*
+ * Retrieves the attributes currently associated with
+ * a memory object.
+ */
+routine memory_object_get_attributes(
+ memory_control : memory_object_control_t;
+ out object_ready : boolean_t;
+ out may_cache : boolean_t;
+ out copy_strategy : memory_object_copy_strategy_t);
+
+/*
+ * Sets the default memory manager, the port to which
+ * newly-created temporary memory objects are delivered.
+ * [See (memory_object_default)memory_object_create.]
+ * The old memory manager port is returned.
+ */
+routine vm_set_default_memory_manager(
+ host_priv : host_priv_t;
+ inout default_manager : mach_port_make_send_t);
+
+skip; /* old pager_flush_request */
+
+/*
+ * Control use of the data associated with the given
+ * memory object. For each page in the given range,
+ * perform the following operations, in order:
+ * 1) restrict access to the page (disallow
+ * forms specified by "prot");
+ * 2) write back modifications (if "should_return"
+ * is RETURN_DIRTY and the page is dirty, or
+ * "should_return" is RETURN_ALL and the page
+ * is either dirty or precious); and,
+ * 3) flush the cached copy (if "should_flush"
+ * is asserted).
+ * The set of pages is defined by a starting offset
+ * ("offset") and size ("size"). Only pages with the
+ * same page alignment as the starting offset are
+ * considered.
+ *
+ * A single acknowledgement is sent (to the "reply_to"
+ * port) when these actions are complete.
+ *
+ * There are two versions of this routine because IPC distinguishes
+ * between booleans and integers (a 2-valued integer is NOT a
+ * boolean). The new routine is backwards compatible at the C
+ * language interface.
+ */
+simpleroutine xxx_memory_object_lock_request(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ should_clean : boolean_t;
+ should_flush : boolean_t;
+ lock_value : vm_prot_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+
+simpleroutine memory_object_lock_request(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ should_return : memory_object_return_t;
+ should_flush : boolean_t;
+ lock_value : vm_prot_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+/* obsolete */
+routine xxx_task_get_emulation_vector(
+ task : task_t;
+ out vector_start : int;
+ out emulation_vector: xxx_emulation_vector_t, IsLong);
+
+/* obsolete */
+routine xxx_task_set_emulation_vector(
+ task : task_t;
+ vector_start : int;
+ emulation_vector: xxx_emulation_vector_t, IsLong);
+
+/*
+ * Returns information about the host on which the
+ * target object resides. [This object may be
+ * a task, thread, or memory_object_control port.]
+ */
+routine xxx_host_info(
+ target_task : mach_port_t;
+ out info : machine_info_data_t);
+
+/*
+ * Returns information about a particular processor on
+ * the host on which the target task resides.
+ */
+routine xxx_slot_info(
+ target_task : task_t;
+ slot : int;
+ out info : machine_slot_data_t);
+
+/*
+ * Performs control operations (currently only
+ * turning off or on) on a particular processor on
+ * the host on which the target task resides.
+ */
+routine xxx_cpu_control(
+ target_task : task_t;
+ cpu : int;
+ running : boolean_t);
+
+skip; /* old thread_statistics */
+skip; /* old task_statistics */
+skip; /* old netport_init */
+skip; /* old netport_enter */
+skip; /* old netport_remove */
+skip; /* old thread_set_priority */
+
+/*
+ * Increment the suspend count for the target task.
+ * No threads within a task may run when the suspend
+ * count for that task is non-zero.
+ */
+routine task_suspend(
+ target_task : task_t);
+
+/*
+ * Decrement the suspend count for the target task,
+ * if the count is currently non-zero. If the resulting
+ * suspend count is zero, then threads within the task
+ * that also have non-zero suspend counts may execute.
+ */
+routine task_resume(
+ target_task : task_t);
+
+/*
+ * Returns the current value of the selected special port
+ * associated with the target task.
+ */
+routine task_get_special_port(
+ task : task_t;
+ which_port : int;
+ out special_port : mach_port_t);
+
+/*
+ * Set one of the special ports associated with the
+ * target task.
+ */
+routine task_set_special_port(
+ task : task_t;
+ which_port : int;
+ special_port : mach_port_t);
+
+/* obsolete */
+routine xxx_task_info(
+ target_task : task_t;
+ flavor : int;
+ out task_info_out : task_info_t, IsLong);
+
+
+/*
+ * Create a new thread within the target task, returning
+ * the port representing that new thread. The
+ * initial execution state of the thread is undefined.
+ */
+routine thread_create(
+ parent_task : task_t;
+ out child_thread : thread_t);
+
+/*
+ * Increment the suspend count for the target thread.
+ * Once this call has completed, the thread will not
+ * execute any further user or meta- instructions.
+ * Once suspended, a thread may not execute again until
+ * its suspend count is zero, and the suspend count
+ * for its task is also zero.
+ */
+routine thread_suspend(
+ target_thread : thread_t);
+
+/*
+ * Decrement the suspend count for the target thread,
+ * if that count is not already zero.
+ */
+routine thread_resume(
+ target_thread : thread_t);
+
+/*
+ * Cause any user or meta- instructions currently being
+ * executed by the target thread to be aborted. [Meta-
+ * instructions consist of the basic traps for IPC
+ * (e.g., msg_send, msg_receive) and self-identification
+ * (e.g., task_self, thread_self, thread_reply). Calls
+ * described by MiG interfaces are not meta-instructions
+ * themselves.]
+ */
+routine thread_abort(
+ target_thread : thread_t);
+
+/* obsolete */
+routine xxx_thread_get_state(
+ target_thread : thread_t;
+ flavor : int;
+ out old_state : thread_state_t, IsLong);
+
+/* obsolete */
+routine xxx_thread_set_state(
+ target_thread : thread_t;
+ flavor : int;
+ new_state : thread_state_t, IsLong);
+
+/*
+ * Returns the current value of the selected special port
+ * associated with the target thread.
+ */
+routine thread_get_special_port(
+ thread : thread_t;
+ which_port : int;
+ out special_port : mach_port_t);
+
+/*
+ * Set one of the special ports associated with the
+ * target thread.
+ */
+routine thread_set_special_port(
+ thread : thread_t;
+ which_port : int;
+ special_port : mach_port_t);
+
+/* obsolete */
+routine xxx_thread_info(
+ target_thread : thread_t;
+ flavor : int;
+ out thread_info_out : thread_info_t, IsLong);
+
+/*
+ * Establish a user-level handler for the specified
+ * system call.
+ */
+routine task_set_emulation(
+ target_port : task_t;
+ routine_entry_pt: vm_address_t;
+ routine_number : int);
+
+/*
+ * Establish restart pc for interrupted atomic sequences.
+ * This reuses the message number for the old task_get_io_port.
+ * See task_info.h for description of flavors.
+ *
+ */
+routine task_ras_control(
+ target_task : task_t;
+ basepc : vm_address_t;
+ boundspc : vm_address_t;
+ flavor : int);
+
+
+
+skip; /* old host_ipc_statistics */
+skip; /* old port_names */
+skip; /* old port_type */
+skip; /* old port_rename */
+skip; /* old port_allocate */
+skip; /* old port_deallocate */
+skip; /* old port_set_backlog */
+skip; /* old port_status */
+skip; /* old port_set_allocate */
+skip; /* old port_set_deallocate */
+skip; /* old port_set_add */
+skip; /* old port_set_remove */
+skip; /* old port_set_status */
+skip; /* old port_insert_send */
+skip; /* old port_extract_send */
+skip; /* old port_insert_receive */
+skip; /* old port_extract_receive */
+
+/*
+ * Map a user-defined memory object into the virtual address
+ * space of the target task. If desired (anywhere is TRUE),
+ * the kernel will find a suitable address range of the
+ * specified size; else, the specific address will be allocated.
+ *
+ * The beginning address of the range will be aligned on a virtual
+ * page boundary, be at or beyond the address specified, and
+ * meet the mask requirements (bits turned on in the mask must not
+ * be turned on in the result); the size of the range, in bytes,
+ * will be rounded up to an integral number of virtual pages.
+ *
+ * The memory in the resulting range will be associated with the
+ * specified memory object, with the beginning of the memory range
+ * referring to the specified offset into the memory object.
+ *
+ * The mapping will take the current and maximum protections and
+ * the inheritance attributes specified; see the vm_protect and
+ * vm_inherit calls for a description of these attributes.
+ *
+ * If desired (copy is TRUE), the memory range will be filled
+ * with a copy of the data from the memory object; this copy will
+ * be private to this mapping in this target task. Otherwise,
+ * the memory in this mapping will be shared with other mappings
+ * of the same memory object at the same offset (in this task or
+ * in other tasks). [The Mach kernel only enforces shared memory
+ * consistency among mappings on one host with similar page alignments.
+ * The user-defined memory manager for this object is responsible
+ * for further consistency.]
+ */
+#ifdef EMULATOR
+routine htg_vm_map(
+ target_task : vm_task_t;
+ ureplyport reply_port : mach_port_make_send_once_t;
+ inout address : vm_address_t;
+ size : vm_size_t;
+ mask : vm_address_t;
+ anywhere : boolean_t;
+ memory_object : memory_object_t;
+ offset : vm_offset_t;
+ copy : boolean_t;
+ cur_protection : vm_prot_t;
+ max_protection : vm_prot_t;
+ inheritance : vm_inherit_t);
+#else /* EMULATOR */
+routine vm_map(
+ target_task : vm_task_t;
+ inout address : vm_address_t;
+ size : vm_size_t;
+ mask : vm_address_t;
+ anywhere : boolean_t;
+ memory_object : memory_object_t;
+ offset : vm_offset_t;
+ copy : boolean_t;
+ cur_protection : vm_prot_t;
+ max_protection : vm_prot_t;
+ inheritance : vm_inherit_t);
+#endif /* EMULATOR */
+
+/*
+ * Indicate that a range of the specified memory object cannot
+ * be provided at this time. [Threads waiting for memory pages
+ * specified by this call will experience a memory exception.
+ * Only threads waiting at the time of the call are affected.]
+ */
+simpleroutine memory_object_data_error(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ error_value : kern_return_t);
+
+/*
+ * Make decisions regarding the use of the specified
+ * memory object.
+ */
+simpleroutine memory_object_set_attributes(
+ memory_control : memory_object_control_t;
+ object_ready : boolean_t;
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t);
+
+/*
+ */
+simpleroutine memory_object_destroy(
+ memory_control : memory_object_control_t;
+ reason : kern_return_t);
+
+/*
+ * Provide the data contents of a range of the given memory
+ * object, with the access restriction specified, optional
+ * precious attribute, and reply message. [Only
+ * whole virtual pages of data can be accepted; partial pages
+ * will be discarded. Data should be provided on request, but
+ * may be provided in advance as desired. When data already
+ * held by this kernel is provided again, the new data is ignored.
+ * The access restriction is the subset of {read, write, execute}
+ * which are prohibited. The kernel may not provide any data (or
+ * protection) consistency among pages with different virtual page
+ * alignments within the same object. The precious value controls
+ * how the kernel treats the data. If it is FALSE, the kernel treats
+ * its copy as a temporary and may throw it away if it hasn't been
+ * changed. If the precious value is TRUE, the kernel treats its
+ * copy as a data repository and promises to return it to the manager;
+ * the manager may tell the kernel to throw it away instead by flushing
+ * and not cleaning the data -- see memory_object_lock_request. The
+ * reply_to port is for a compeletion message; it will be
+ * memory_object_supply_completed.]
+ */
+
+simpleroutine memory_object_data_supply(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ data : pointer_t, Dealloc[];
+ lock_value : vm_prot_t;
+ precious : boolean_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+simpleroutine memory_object_ready(
+ memory_control : memory_object_control_t;
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t);
+
+simpleroutine memory_object_change_attributes(
+ memory_control : memory_object_control_t;
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+skip; /* old host_callout_statistics_reset */
+skip; /* old port_set_select */
+skip; /* old port_set_backup */
+
+/*
+ * Set/Get special properties of memory associated
+ * to some virtual address range, such as cachability,
+ * migrability, replicability. Machine-dependent.
+ */
+routine vm_machine_attribute(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ attribute : vm_machine_attribute_t;
+ inout value : vm_machine_attribute_val_t);
+
+/*skip;*/ /* old host_fpa_counters_reset */
+
+/*
+ * This routine is created for allocating DMA buffers.
+ * We are going to get a contiguous physical memory
+ * and its physical address in addition to the virtual address.
+ */
+routine vm_dma_buff_alloc(
+ host_priv : host_priv_t;
+ target_task : vm_task_t;
+ size : vm_size_t;
+ out vaddr : vm_address_t;
+ out paddr : vm_address_t);
+
+/*
+ * There is no more room in this interface for additional calls.
+ */
diff --git a/pcnet32/main.c b/pcnet32/main.c
new file mode 100644
index 000000000..898da8cc5
--- /dev/null
+++ b/pcnet32/main.c
@@ -0,0 +1,236 @@
+#include <error.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <pciaccess.h>
+
+#include <cthreads.h>
+#include <mach.h>
+#include <hurd/trivfs.h>
+#include <hurd/ports.h>
+
+#include "device_U.h"
+#include "irq.h"
+#include "netdevice.h"
+
+struct port_bucket *port_bucket;
+struct port_bucket *irq_port_bucket;
+struct port_class *dev_class;
+struct port_class *intr_class;
+
+/* Trivfs hooks. */
+int trivfs_fstype = FSTYPE_MISC;
+int trivfs_fsid = 0;
+int trivfs_support_read = 0;
+int trivfs_support_write = 0;
+int trivfs_support_exec = 0;
+int trivfs_allow_open = O_READ | O_WRITE;
+
+struct port_class *trivfs_protid_portclasses[1];
+struct port_class *trivfs_cntl_portclasses[1];
+int trivfs_protid_nportclasses = 1;
+int trivfs_cntl_nportclasses = 1;
+
+/* Implementation of notify interface */
+kern_return_t
+do_mach_notify_port_deleted (mach_port_t notify,
+ mach_port_t name)
+{
+ return EOPNOTSUPP;
+}
+
+kern_return_t
+do_mach_notify_msg_accepted (mach_port_t notify,
+ mach_port_t name)
+{
+ return EOPNOTSUPP;
+}
+
+kern_return_t
+do_mach_notify_port_destroyed (mach_port_t notify,
+ mach_port_t port)
+{
+ return EOPNOTSUPP;
+}
+
+kern_return_t
+do_mach_notify_no_senders (mach_port_t notify,
+ mach_port_mscount_t mscount)
+{
+ return ports_do_mach_notify_no_senders (notify, mscount);
+}
+
+kern_return_t
+do_mach_notify_send_once (mach_port_t notify)
+{
+ return EOPNOTSUPP;
+}
+
+kern_return_t
+do_mach_notify_dead_name (mach_port_t notify,
+ mach_port_t name)
+{
+ return EOPNOTSUPP;
+}
+
+
+void
+trivfs_modify_stat (struct trivfs_protid *cred, io_statbuf_t *stat)
+{
+}
+
+int notify_irq_server (mach_msg_header_t *inp, mach_msg_header_t *outp)
+{
+ extern void pcnet32_interrupt(int irq);
+ extern struct device *ether_dev;
+ mach_irq_notification_t *irq_header = (mach_irq_notification_t *) inp;
+
+ if (inp->msgh_id != MACH_NOTIFY_IRQ)
+ return 0;
+
+ if (irq_header->irq == ether_dev->irq)
+ pcnet32_interrupt (irq_header->irq);
+// ((mig_reply_header_t *) outp)->RetCode = MIG_NO_REPLY;
+ return 1;
+}
+
+mach_port_t master_device;
+mach_port_t priv_host;
+
+void int_handler (int sig)
+{
+ error_t err = device_intr_notify (master_device, 2, 0, MACH_PORT_NULL,
+ MACH_MSG_TYPE_MAKE_SEND);
+ if (err)
+ error (2, err, "device_intr_notify");
+
+ exit (0);
+}
+
+static int
+demuxer (mach_msg_header_t *inp, mach_msg_header_t *outp)
+{
+ extern int device_server (mach_msg_header_t *, mach_msg_header_t *);
+ extern int notify_server (mach_msg_header_t *, mach_msg_header_t *);
+ return device_server (inp, outp) || notify_server (inp, outp)
+ || trivfs_demuxer (inp, outp) || notify_irq_server (inp, outp);
+}
+
+boolean_t
+is_master_device (mach_port_t port)
+{
+ struct port_info *pi = ports_lookup_port (port_bucket, port,
+ trivfs_protid_portclasses[0]);
+ if (pi == NULL)
+ return FALSE;
+
+ ports_port_deref (pi);
+ return TRUE;
+}
+
+error_t
+trivfs_goaway (struct trivfs_control *fsys, int flags)
+{
+ int count;
+
+ /* Stop new requests. */
+ ports_inhibit_class_rpcs (trivfs_cntl_portclasses[0]);
+ ports_inhibit_class_rpcs (trivfs_protid_portclasses[0]);
+
+ count = ports_count_class (trivfs_protid_portclasses[0]);
+
+ if (count && !(flags & FSYS_GOAWAY_FORCE))
+ {
+ /* We won't go away, so start things going again... */
+ ports_enable_class (trivfs_protid_portclasses[0]);
+ ports_resume_class_rpcs (trivfs_cntl_portclasses[0]);
+ ports_resume_class_rpcs (trivfs_protid_portclasses[0]);
+ return EBUSY;
+ }
+
+ mach_port_deallocate (mach_task_self (), master_device);
+ pci_system_cleanup ();
+ exit (0);
+}
+
+void dev_clean_routine (void *port)
+{
+ error_t err = device_intr_notify (master_device, 2, 0, MACH_PORT_NULL,
+ MACH_MSG_TYPE_MAKE_SEND);
+ if (err)
+ error (2, err, "device_intr_notify");
+}
+
+any_t
+irq_receive_thread(any_t unused)
+{
+ int irq_demuxer (mach_msg_header_t *inp, mach_msg_header_t *outp)
+ {
+ return notify_irq_server (inp, outp);
+ }
+ ports_manage_port_operations_one_thread (irq_port_bucket, irq_demuxer, 0);
+ return 0;
+}
+
+int
+main ()
+{
+ extern void mach_device_init();
+ extern any_t io_done_thread(any_t unused);
+ extern boolean_t ioperm_ports ();
+ extern int pcnet32_probe(struct device *dev);
+ mach_port_t bootstrap;
+ error_t err;
+ struct trivfs_control *fsys;
+
+ task_get_bootstrap_port (mach_task_self (), &bootstrap);
+ if (bootstrap == MACH_PORT_NULL)
+ error (1, 0, "must be started as a translator");
+
+ if (!ioperm_ports ())
+ error (2, errno, "cannot set the port access permission for the keyboard");
+
+ signal (SIGINT, int_handler);
+
+ err = get_privileged_ports (&priv_host, &master_device);
+ if (err)
+ error (1, err, "get_privileged_ports");
+
+ /* Initialize the port bucket and port classes. */
+ port_bucket = ports_create_bucket ();
+ irq_port_bucket = ports_create_bucket ();
+ dev_class = ports_create_class (dev_clean_routine, 0);
+ intr_class = ports_create_class (0, 0);
+ trivfs_cntl_portclasses[0] = ports_create_class (trivfs_clean_cntl, 0);
+ trivfs_protid_portclasses[0] = ports_create_class (trivfs_clean_protid, 0);
+
+ err = pci_system_init ();
+ if (err)
+ error (2, err, "pci_system_init");
+
+ err = pcnet32_probe (NULL);
+ if (err)
+ error (2, err, "pcnet32_probe");
+
+ mach_device_init ();
+ linux_net_emulation_init ();
+ linux_kmem_init ();
+
+ /* Reply to our parent. */
+ err = trivfs_startup (bootstrap, 0,
+ trivfs_cntl_portclasses[0], port_bucket,
+ trivfs_protid_portclasses[0], port_bucket, &fsys);
+ mach_port_deallocate (mach_task_self (), bootstrap);
+ if (err)
+ error (1, err, "Contacting parent");
+
+ cthread_detach (cthread_fork (io_done_thread, 0));
+ cthread_detach (cthread_fork (irq_receive_thread, 0));
+
+ /* Launch. */
+ do
+ {
+ ports_manage_port_operations_one_thread (port_bucket, demuxer, 0);
+ } while (trivfs_goaway (fsys, 0));
+ return 0;
+}
+
diff --git a/pcnet32/net.c b/pcnet32/net.c
new file mode 100644
index 000000000..fe7cbfe66
--- /dev/null
+++ b/pcnet32/net.c
@@ -0,0 +1,834 @@
+/*
+ * Linux network driver support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Ethernet-type device handling.
+ *
+ * Version: @(#)eth.c 1.0.7 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Mr Linux : Arp problems
+ * Alan Cox : Generic queue tidyup (very tiny here)
+ * Alan Cox : eth_header ntohs should be htons
+ * Alan Cox : eth_rebuild_header missing an htons and
+ * minor other things.
+ * Tegge : Arp bug fixes.
+ * Florian : Removed many unnecessary functions, code cleanup
+ * and changes for new arp and skbuff.
+ * Alan Cox : Redid header building to reflect new format.
+ * Alan Cox : ARP only when compiled with CONFIG_INET
+ * Greg Page : 802.2 and SNAP stuff.
+ * Alan Cox : MAC layer pointers/new format.
+ * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding.
+ * Alan Cox : Protect against forwarding explosions with
+ * older network drivers and IFF_ALLMULTI
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <error.h>
+#include <arpa/inet.h>
+
+#include "mach_U.h"
+
+#include <mach.h>
+#include <hurd.h>
+
+#define MACH_INCLUDE
+
+#include "vm_param.h"
+#include "netdevice.h"
+#include "device_reply_U.h"
+#include "if_ether.h"
+#include "dev_hdr.h"
+#include "if.h"
+#include "util.h"
+
+#define ether_header ethhdr
+
+extern int linux_intr_pri;
+extern struct port_bucket *port_bucket;
+extern struct port_class *dev_class;
+
+/* One of these is associated with each instance of a device. */
+struct net_data
+{
+ struct port_info port; /* device port */
+// struct ifnet ifnet; /* Mach ifnet structure (needed for filters) */
+ struct emul_device device; /* generic device structure */
+ mach_port_t delivery_port;
+ struct linux_device *dev; /* Linux network device structure */
+};
+
+/* List of sk_buffs waiting to be freed. */
+static struct sk_buff_head skb_done_list;
+
+/* Forward declarations. */
+
+extern struct device_emulation_ops linux_net_emulation_ops;
+
+static int print_packet_size = 0;
+
+mach_msg_type_t header_type =
+{
+ MACH_MSG_TYPE_BYTE,
+ 8,
+ NET_HDW_HDR_MAX,
+ TRUE,
+ FALSE,
+ FALSE,
+ 0
+};
+
+mach_msg_type_t packet_type =
+{
+ MACH_MSG_TYPE_BYTE, /* name */
+ 8, /* size */
+ 0, /* number */
+ TRUE, /* inline */
+ FALSE, /* longform */
+ FALSE /* deallocate */
+};
+
+/* Linux kernel network support routines. */
+
+/* Requeue packet SKB for transmission after the interface DEV
+ has timed out. The priority of the packet is PRI.
+ In Mach, we simply drop the packet like the native drivers. */
+void
+dev_queue_xmit (struct sk_buff *skb, struct linux_device *dev, int pri)
+{
+ dev_kfree_skb (skb, FREE_WRITE);
+}
+
+/* Close the device DEV. */
+int
+dev_close (struct linux_device *dev)
+{
+ return 0;
+}
+
+/* Network software interrupt handler. */
+//void
+//net_bh (void)
+//{
+// int len;
+// struct sk_buff *skb;
+// struct linux_device *dev;
+//
+// /* Start transmission on interfaces. */
+// for (dev = dev_base; dev; dev = dev->next)
+// {
+// if (dev->base_addr && dev->base_addr != 0xffe0)
+// while (1)
+// {
+// skb = skb_dequeue (&dev->buffs[0]);
+// if (skb)
+// {
+// len = skb->len;
+// if ((*dev->hard_start_xmit) (skb, dev))
+// {
+// skb_queue_head (&dev->buffs[0], skb);
+// mark_bh (NET_BH);
+// break;
+// }
+// else if (print_packet_size)
+// printf ("net_bh: length %d\n", len);
+// }
+// else
+// break;
+// }
+// }
+//}
+
+/* Free all sk_buffs on the done list.
+ This routine is called by the iodone thread in ds_routines.c. */
+void
+free_skbuffs ()
+{
+ struct sk_buff *skb;
+
+ while (1)
+ {
+ skb = skb_dequeue (&skb_done_list);
+ if (skb)
+ {
+// if (skb->copy)
+// {
+// vm_map_copy_discard (skb->copy);
+// skb->copy = NULL;
+// }
+ if (MACH_PORT_VALID (skb->reply))
+ {
+ ds_device_write_reply (skb->reply, skb->reply_type, 0, skb->len);
+ skb->reply = MACH_PORT_NULL;
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+ }
+ else
+ break;
+ }
+}
+
+/* Allocate an sk_buff with SIZE bytes of data space. */
+struct sk_buff *
+alloc_skb (unsigned int size, int priority)
+{
+ return dev_alloc_skb (size);
+}
+
+/* Free SKB. */
+void
+kfree_skb (struct sk_buff *skb, int priority)
+{
+ dev_kfree_skb (skb, priority);
+}
+
+/* Allocate an sk_buff with SIZE bytes of data space. */
+struct sk_buff *
+dev_alloc_skb (unsigned int size)
+{
+ struct sk_buff *skb;
+ int len = size;
+
+ size = (size + 15) & ~15;
+ size += sizeof (struct sk_buff);
+
+ // TODO the max packet size is 1 page,
+ // so I don't need to record the size that will be used in deallocation.
+ if (size > PAGE_SIZE)
+ {
+ fprintf (stderr,
+ "WARNING! fail to allocate a packet of %d bytes\n", size);
+ return NULL;
+ }
+
+ /* XXX: In Mach, a sk_buff is located at the head,
+ while it's located at the tail in Linux. */
+ skb = (struct sk_buff *) linux_kmalloc (size, 0);
+ if (skb == NULL)
+ {
+ debug ("fails to allocate memory for the packet.");
+ return NULL;
+ }
+
+ skb->dev = NULL;
+ skb->reply = MACH_PORT_NULL;
+// skb->copy = NULL;
+ skb->len = 0;
+ skb->prev = skb->next = NULL;
+ skb->list = NULL;
+ skb->data = ((unsigned char *) skb) + sizeof (struct sk_buff);
+ skb->tail = skb->data;
+ skb->head = skb->data;
+ skb->end = skb->data + len;
+
+ return skb;
+}
+
+/* Free the sk_buff SKB. */
+void
+dev_kfree_skb (struct sk_buff *skb, int mode)
+{
+// unsigned flags;
+ extern void wakeup_io_done_thread ();
+
+ /* Queue sk_buff on done list if there is a
+ page list attached or we need to send a reply.
+ Wakeup the iodone thread to process the list. */
+ if (/*skb->copy ||*/ MACH_PORT_VALID (skb->reply))
+ {
+ skb_queue_tail (&skb_done_list, skb);
+// save_flags (flags);
+ wakeup_io_done_thread ();
+// restore_flags (flags);
+ return;
+ }
+ linux_kfree (skb);
+}
+/*
+ * Deliver the message to all right pfinet servers that
+ * connects to the virtual network interface.
+ */
+int
+deliver_msg(mach_port_t dest, struct net_rcv_msg *msg)
+{
+ mach_msg_return_t err;
+
+ msg->msg_hdr.msgh_bits = MACH_MSGH_BITS (MACH_MSG_TYPE_COPY_SEND, 0);
+ /* remember message sizes must be rounded up */
+ msg->msg_hdr.msgh_local_port = MACH_PORT_NULL;
+ msg->msg_hdr.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ msg->msg_hdr.msgh_id = NET_RCV_MSG_ID;
+
+ msg->msg_hdr.msgh_remote_port = dest;
+ err = mach_msg ((mach_msg_header_t *)msg,
+ MACH_SEND_MSG|MACH_SEND_TIMEOUT,
+ msg->msg_hdr.msgh_size, 0, MACH_PORT_NULL,
+ 0, MACH_PORT_NULL);
+ if (err != MACH_MSG_SUCCESS)
+ {
+ mach_port_deallocate(mach_task_self (),
+ ((mach_msg_header_t *)msg)->msgh_remote_port);
+// error (0, err, "mach_msg");
+ return err;
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+/* Accept packet SKB received on an interface. */
+void
+netif_rx (struct sk_buff *skb)
+{
+ int pack_size;
+ net_rcv_msg_t net_msg;
+ struct ether_header *eh;
+ struct packet_header *ph;
+ struct linux_device *dev = skb->dev;
+
+ assert (skb != NULL);
+
+ if (print_packet_size)
+ printf ("netif_rx: length %ld\n", skb->len);
+
+ /* Allocate a kernel message buffer. */
+ net_msg = malloc (sizeof (*net_msg));
+ if (!net_msg)
+ {
+ dev_kfree_skb (skb, FREE_READ);
+ return;
+ }
+
+ pack_size = skb->len - sizeof (struct ethhdr);
+ /* remember message sizes must be rounded up */
+ net_msg->msg_hdr.msgh_size = (((mach_msg_size_t) (sizeof(struct net_rcv_msg)
+ - NET_RCV_MAX + pack_size)) + 3) & ~3;
+
+ /* Copy packet into message buffer. */
+ eh = (struct ether_header *) (net_msg->header);
+ ph = (struct packet_header *) (net_msg->packet);
+ memcpy (eh, skb->data, sizeof (struct ether_header));
+ /* packet is prefixed with a struct packet_header,
+ see include/device/net_status.h. */
+ memcpy (ph + 1, skb->data + sizeof (struct ether_header), pack_size);
+ ph->type = eh->h_proto;
+ ph->length = pack_size + sizeof (struct packet_header);
+
+ dev_kfree_skb (skb, FREE_READ);
+
+ net_msg->sent = FALSE; /* Mark packet as received. */
+
+ net_msg->header_type = header_type;
+ net_msg->packet_type = packet_type;
+ net_msg->net_rcv_msg_packet_count = ph->length;
+ deliver_msg (dev->net_data->delivery_port, net_msg);
+ free (net_msg);
+}
+
+/* Mach device interface routines. */
+
+/* Return a send right associated with network device ND. */
+static mach_port_t
+dev_to_port (void *nd)
+{
+ return (nd
+ ? ports_get_send_right (nd)
+ : MACH_PORT_NULL);
+}
+
+
+/*
+ * * Initialize send and receive queues on an interface.
+ * */
+//void if_init_queues(ifp)
+// register struct ifnet *ifp;
+//{
+// IFQ_INIT(&ifp->if_snd);
+// queue_init(&ifp->if_rcv_port_list);
+// queue_init(&ifp->if_snd_port_list);
+// simple_lock_init(&ifp->if_rcv_port_list_lock);
+// simple_lock_init(&ifp->if_snd_port_list_lock);
+//}
+
+
+static io_return_t
+device_open (mach_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, char *name, device_t *devp)
+{
+ io_return_t err = D_SUCCESS;
+// struct ifnet *ifp;
+ struct linux_device *dev;
+ struct net_data *nd;
+
+ /* Search for the device. */
+ for (dev = dev_base; dev; dev = dev->next)
+ {
+ if (dev->base_addr
+ && dev->base_addr != 0xffe0
+ && !strcmp (name, dev->name))
+ break;
+ }
+ if (!dev)
+ return D_NO_SUCH_DEVICE;
+
+ /* Allocate and initialize device data if this is the first open. */
+ nd = dev->net_data;
+ if (!nd)
+ {
+ err = ports_create_port (dev_class, port_bucket,
+ sizeof (*nd), &nd);
+ if (err)
+ goto out;
+
+ dev->net_data = nd;
+ nd->dev = dev;
+ nd->device.emul_data = nd;
+ nd->device.emul_ops = &linux_net_emulation_ops;
+// ipc_kobject_set (nd->port, (ipc_kobject_t) & nd->device, IKOT_DEVICE);
+// notify = ipc_port_make_sonce (nd->port);
+// ip_lock (nd->port);
+// ipc_port_nsrequest (nd->port, 1, notify, &notify);
+// assert (notify == IP_NULL);
+
+// ifp = &nd->ifnet;
+// ifp->if_unit = dev->name[strlen (dev->name) - 1] - '0';
+// ifp->if_flags = IFF_UP | IFF_RUNNING;
+// ifp->if_mtu = dev->mtu;
+// ifp->if_header_size = dev->hard_header_len;
+// ifp->if_header_format = dev->type;
+// ifp->if_address_size = dev->addr_len;
+// ifp->if_address = dev->dev_addr;
+// if_init_queues (ifp);
+
+ if (dev->open)
+ {
+// linux_intr_pri = SPL6;
+ if ((*dev->open) (dev))
+ err = D_NO_SUCH_DEVICE;
+ }
+
+ out:
+ if (err)
+ {
+ if (nd)
+ {
+ ports_destroy_right (nd);
+ nd = NULL;
+ dev->net_data = NULL;
+ }
+ }
+ else
+ {
+ /* IPv6 heavily relies on multicasting (especially router and
+ neighbor solicits and advertisements), so enable reception of
+ those multicast packets by setting `LINUX_IFF_ALLMULTI'. */
+ dev->flags |= LINUX_IFF_UP | LINUX_IFF_RUNNING | LINUX_IFF_ALLMULTI;
+ skb_queue_head_init (&dev->buffs[0]);
+
+ if (dev->set_multicast_list)
+ dev->set_multicast_list (dev);
+ }
+ if (MACH_PORT_VALID (reply_port))
+ ds_device_open_reply (reply_port, reply_port_type,
+ err, dev_to_port (nd));
+ return MIG_NO_REPLY;
+ }
+
+ *devp = ports_get_right (nd);
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_write (void *d, mach_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+// unsigned char *p;
+ int amt, skblen;
+// io_return_t err = 0;
+ struct net_data *nd = d;
+ struct linux_device *dev = nd->dev;
+ struct sk_buff *skb;
+
+ if (count == 0 || count > dev->mtu + dev->hard_header_len)
+ return D_INVALID_SIZE;
+
+ /* Allocate a sk_buff. */
+// amt = PAGE_SIZE - (copy->offset & PAGE_MASK);
+// TODO need for test.
+ amt = PAGE_SIZE - (((int) data) & PAGE_MASK);
+ skblen = count;
+ skb = dev_alloc_skb (skblen);
+ if (!skb)
+ return D_NO_MEMORY;
+
+ /* Copy user data. This is only required if it spans multiple pages. */
+ {
+ skb->len = skblen;
+ skb->tail = skb->data + skblen;
+ skb->end = skb->tail;
+
+ memcpy (skb->data, data, count);
+// memcpy (skb->data,
+// ((void *) copy->cpy_page_list[0]->phys_addr
+// + (copy->offset & PAGE_MASK)),
+// amt);
+// count -= amt;
+// p = skb->data + amt;
+// for (i = 1; count > 0 && i < copy->cpy_npages; i++)
+// {
+// amt = PAGE_SIZE;
+// if (amt > count)
+// amt = count;
+// memcpy (p, (void *) copy->cpy_page_list[i]->phys_addr, amt);
+// count -= amt;
+// p += amt;
+// }
+//
+// assert (count == 0);
+
+// vm_map_copy_discard (copy);
+ vm_deallocate (mach_task_self (), (vm_address_t) data, count);
+ }
+
+ skb->dev = dev;
+ skb->reply = reply_port;
+ skb->reply_type = reply_port_type;
+
+ /* Queue packet for transmission and schedule a software interrupt. */
+ // TODO should I give any protection here?
+// s = splimp ();
+ if (dev->buffs[0].next != (struct sk_buff *) &dev->buffs[0]
+ || (*dev->hard_start_xmit) (skb, dev))
+ {
+ __skb_queue_tail (&dev->buffs[0], skb);
+// mark_bh (NET_BH);
+ }
+// splx (s);
+
+ /* Send packet to filters. */
+ // TODO should I deliver the packet to other network stacks?
+// {
+// struct packet_header *packet;
+// struct ether_header *header;
+// ipc_kmsg_t kmsg;
+//
+// kmsg = net_kmsg_get ();
+//
+// if (kmsg != IKM_NULL)
+// {
+// /* Suitable for Ethernet only. */
+// header = (struct ether_header *) (net_kmsg (kmsg)->header);
+// packet = (struct packet_header *) (net_kmsg (kmsg)->packet);
+// memcpy (header, skb->data, sizeof (struct ether_header));
+//
+// /* packet is prefixed with a struct packet_header,
+// see include/device/net_status.h. */
+// memcpy (packet + 1, skb->data + sizeof (struct ether_header),
+// skb->len - sizeof (struct ether_header));
+// packet->length = skb->len - sizeof (struct ether_header)
+// + sizeof (struct packet_header);
+// packet->type = header->ether_type;
+// net_kmsg (kmsg)->sent = TRUE; /* Mark packet as sent. */
+// s = splimp ();
+// net_packet (&dev->net_data->ifnet, kmsg, packet->length,
+// ethernet_priority (kmsg));
+// splx (s);
+// }
+// }
+
+ return MIG_NO_REPLY;
+}
+
+/*
+ * Other network operations
+ */
+io_return_t
+net_getstat(dev, flavor, status, count)
+ struct linux_device *dev;
+ dev_flavor_t flavor;
+ dev_status_t status; /* pointer to OUT array */
+ natural_t *count; /* OUT */
+{
+#define ETHERMTU 1500
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ register struct net_status *ns = (struct net_status *)status;
+
+ if (*count < NET_STATUS_COUNT)
+ return (D_INVALID_OPERATION);
+
+ ns->min_packet_size = 60;
+ ns->max_packet_size = ETH_HLEN + ETHERMTU;
+ ns->header_format = HDR_ETHERNET;
+ ns->header_size = ETH_HLEN;
+ ns->address_size = ETH_ALEN;
+ ns->flags = 0;
+ ns->mapped_size = 0;
+
+ *count = NET_STATUS_COUNT;
+ break;
+ }
+ case NET_ADDRESS:
+ {
+ register int addr_byte_count;
+ register int addr_int_count;
+ register int i;
+
+ addr_byte_count = ETH_ALEN;
+ addr_int_count = (addr_byte_count + (sizeof(int)-1))
+ / sizeof(int);
+
+ if (*count < addr_int_count)
+ {
+ /* XXX debug hack. */
+ printf ("net_getstat: count: %d, addr_int_count: %d\n",
+ *count, addr_int_count);
+ return (D_INVALID_OPERATION);
+ }
+
+ memcpy(status, dev->dev_addr, addr_byte_count);
+ if (addr_byte_count < addr_int_count * sizeof(int))
+ memset((char *)status + addr_byte_count, 0,
+ (addr_int_count * sizeof(int)
+ - addr_byte_count));
+
+ for (i = 0; i < addr_int_count; i++) {
+ register int word;
+
+ word = status[i];
+ status[i] = htonl(word);
+ }
+ *count = addr_int_count;
+ break;
+ }
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+static io_return_t
+device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *count)
+{
+ struct net_data *net = (struct net_data *) d;
+
+ if (flavor == NET_FLAGS)
+ {
+ if (*count != sizeof(short))
+ return D_INVALID_SIZE;
+
+ *(short *) status = net->dev->flags;
+ return D_SUCCESS;
+ }
+
+// if(flavor >= SIOCIWFIRST && flavor <= SIOCIWLAST)
+// {
+// /* handle wireless ioctl */
+// if(! IW_IS_GET(flavor))
+// return D_INVALID_OPERATION;
+//
+// if(*count * sizeof(int) < sizeof(struct ifreq))
+// return D_INVALID_OPERATION;
+//
+// struct net_data *nd = d;
+// struct linux_device *dev = nd->dev;
+//
+// if(! dev->do_ioctl)
+// return D_INVALID_OPERATION;
+//
+// int result;
+//
+// if (flavor == SIOCGIWRANGE || flavor == SIOCGIWENCODE
+// || flavor == SIOCGIWESSID || flavor == SIOCGIWNICKN
+// || flavor == SIOCGIWSPY)
+// {
+// /*
+// * These ioctls require an `iw_point' as their argument (i.e.
+// * they want to return some data to userspace.
+// * Therefore supply some sane values and carry the data back
+// * to userspace right behind the `struct iwreq'.
+// */
+// struct iw_point *iwp = &((struct iwreq *) status)->u.data;
+// iwp->length = *count * sizeof (dev_status_t) - sizeof (struct ifreq);
+// iwp->pointer = (void *) status + sizeof (struct ifreq);
+//
+// result = dev->do_ioctl (dev, (struct ifreq *) status, flavor);
+//
+// *count = ((sizeof (struct ifreq) + iwp->length)
+// / sizeof (dev_status_t));
+// if (iwp->length % sizeof (dev_status_t))
+// (*count) ++;
+// }
+// else
+// {
+// *count = sizeof(struct ifreq) / sizeof(int);
+// result = dev->do_ioctl(dev, (struct ifreq *) status, flavor);
+// }
+//
+// return result ? D_IO_ERROR : D_SUCCESS;
+// }
+// else
+ {
+ /* common get_status request */
+ return net_getstat (net->dev, flavor, status, count);
+ }
+}
+
+/*
+ * Change the flags of device DEV to FLAGS.
+ */
+int dev_change_flags (struct linux_device *dev, short flags)
+{
+// if (securelevel > 0)
+ flags &= ~LINUX_IFF_PROMISC;
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (flags &
+ (LINUX_IFF_BROADCAST | LINUX_IFF_DEBUG | LINUX_IFF_LOOPBACK |
+ LINUX_IFF_POINTOPOINT | LINUX_IFF_NOTRAILERS | LINUX_IFF_RUNNING |
+ LINUX_IFF_NOARP | LINUX_IFF_PROMISC | LINUX_IFF_ALLMULTI
+ | LINUX_IFF_SLAVE | LINUX_IFF_MASTER | LINUX_IFF_MULTICAST))
+ | (dev->flags & (LINUX_IFF_SOFTHEADERS|LINUX_IFF_UP));
+
+ /* The flags are taken into account (multicast, promiscuous, ...)
+ in the set_multicast_list handler. */
+ if ((dev->flags & LINUX_IFF_UP) && dev->set_multicast_list != NULL)
+ dev->set_multicast_list (dev);
+
+ return 0;
+}
+
+
+static io_return_t
+device_set_status(void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t count)
+{
+ if (flavor == NET_FLAGS)
+ {
+ if (count != sizeof(short))
+ return D_INVALID_SIZE;
+
+ short flags = *(short *) status;
+ struct net_data *net = (struct net_data *) d;
+
+ dev_change_flags (net->dev, flags);
+
+ /* Change the flags of the Mach device, too. */
+// net->ifnet.if_flags = net->dev->flags;
+ return D_SUCCESS;
+ }
+ return D_INVALID_OPERATION;
+
+// if(flavor < SIOCIWFIRST || flavor > SIOCIWLAST)
+// return D_INVALID_OPERATION;
+//
+// if(! IW_IS_SET(flavor))
+// return D_INVALID_OPERATION;
+//
+// if(count * sizeof(int) < sizeof(struct ifreq))
+// return D_INVALID_OPERATION;
+//
+// struct net_data *nd = d;
+// struct linux_device *dev = nd->dev;
+//
+// if(! dev->do_ioctl)
+// return D_INVALID_OPERATION;
+//
+// if((flavor == SIOCSIWENCODE || flavor == SIOCSIWESSID
+// || flavor == SIOCSIWNICKN || flavor == SIOCSIWSPY)
+// && ((struct iwreq *) status)->u.data.pointer)
+// {
+// struct iw_point *iwp = &((struct iwreq *) status)->u.data;
+//
+// /* safety check whether the status array is long enough ... */
+// if(count * sizeof(int) < sizeof(struct ifreq) + iwp->length)
+// return D_INVALID_OPERATION;
+//
+// /* make sure, iwp->pointer points to the correct address */
+// if(iwp->pointer) iwp->pointer = (void *) status + sizeof(struct ifreq);
+// }
+//
+// int result = dev->do_ioctl(dev, (struct ifreq *) status, flavor);
+// return result ? D_IO_ERROR : D_SUCCESS;
+}
+
+
+static io_return_t
+device_set_filter (void *d, mach_port_t port, int priority,
+ filter_t * filter, unsigned filter_count)
+{
+ ((struct net_data *) d)->delivery_port = port;
+ return 0;
+// return net_set_filter (&((struct net_data *) d)->ifnet,
+// port, priority, filter, filter_count);
+}
+
+struct device_emulation_ops linux_net_emulation_ops =
+{
+ NULL,
+ NULL,
+ dev_to_port,
+ device_open,
+ NULL,
+ device_write,
+ NULL,
+ NULL,
+ NULL,
+ device_set_status,
+ device_get_status,
+ device_set_filter,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+/* Do any initialization required for network devices. */
+void
+linux_net_emulation_init ()
+{
+ skb_queue_head_init (&skb_done_list);
+}
diff --git a/pcnet32/net_init.c b/pcnet32/net_init.c
new file mode 100644
index 000000000..f375c2ab0
--- /dev/null
+++ b/pcnet32/net_init.c
@@ -0,0 +1,450 @@
+/* netdrv_init.c: Initialization for network devices. */
+/*
+ Written 1993,1994,1995 by Donald Becker.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov or
+ C/O Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This file contains the initialization for the "pl14+" style ethernet
+ drivers. It should eventually replace most of drivers/net/Space.c.
+ It's primary advantage is that it's able to allocate low-memory buffers.
+ A secondary advantage is that the dangerous NE*000 netcards can reserve
+ their I/O port region before the SCSI probes start.
+
+ Modifications/additions by Bjorn Ekwall <bj0rn@blox.se>:
+ ethdev_index[MAX_ETH_CARDS]
+ register_netdev() / unregister_netdev()
+
+ Modifications by Wolfgang Walter
+ Use dev_close cleanly so we always shut things down tidily.
+
+ Changed 29/10/95, Alan Cox to pass sockaddr's around for mac addresses.
+
+ 14/06/96 - Paul Gortmaker: Add generic eth_change_mtu() function.
+
+ August 12, 1996 - Lawrence V. Stefani: Added fddi_change_mtu() and
+ fddi_setup() functions.
+ Sept. 10, 1996 - Lawrence V. Stefani: Increased hard_header_len to
+ include 3 pad bytes.
+*/
+
+/* The network devices currently exist only in the socket namespace, so these
+ entries are unused. The only ones that make sense are
+ open start the ethercard
+ close stop the ethercard
+ ioctl To get statistics, perhaps set the interface port (AUI, BNC, etc.)
+ One can also imagine getting raw packets using
+ read & write
+ but this is probably better handled by a raw packet socket.
+
+ Given that almost all of these functions are handled in the current
+ socket-based scheme, putting ethercard devices in /dev/ seems pointless.
+
+ [Removed all support for /dev network devices. When someone adds
+ streams then by magic we get them, but otherwise they are un-needed
+ and a space waste]
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+
+#include "if.h"
+#include "if_arp.h"
+#include "if_ether.h"
+#include "netdevice.h"
+#include "util.h"
+
+/* The list of used and available "eth" slots (for "eth0", "eth1", etc.) */
+#define MAX_ETH_CARDS 16 /* same as the number if irq's in irq2dev[] */
+static struct device *ethdev_index[MAX_ETH_CARDS];
+
+struct device *dev_base;
+
+struct port_class *intr_class;
+struct port_bucket *irq_port_bucket;
+
+
+/* Fill in the fields of the device structure with ethernet-generic values.
+
+ If no device structure is passed, a new one is constructed, complete with
+ a SIZEOF_PRIVATE private data area.
+
+ If an empty string area is passed as dev->name, or a new structure is made,
+ a new name string is constructed. The passed string area should be 8 bytes
+ long.
+ */
+
+struct device *
+init_etherdev(struct device *dev, int sizeof_priv)
+{
+ int new_device = 0;
+ int i;
+
+ /* Use an existing correctly named device in Space.c:dev_base. */
+ if (dev == NULL) {
+ int alloc_size = sizeof(struct device) + sizeof("eth%d ")
+ + sizeof_priv + 3;
+ struct device *cur_dev;
+ char pname[8]; /* Putative name for the device. */
+
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(pname, "eth%d", i);
+ for (cur_dev = dev_base; cur_dev; cur_dev = cur_dev->next)
+ if (strcmp(pname, cur_dev->name) == 0) {
+ dev = cur_dev;
+ dev->init = NULL;
+ sizeof_priv = (sizeof_priv + 3) & ~3;
+ dev->priv = sizeof_priv
+ ? malloc(sizeof_priv)
+ : NULL;
+ if (dev->priv) memset(dev->priv, 0, sizeof_priv);
+ goto found;
+ }
+ }
+
+ alloc_size &= ~3; /* Round to dword boundary. */
+
+ ports_create_port (intr_class, irq_port_bucket,
+ sizeof (struct port_info) + alloc_size, &dev);
+ memset(((struct port_info *) dev) + 1, 0, alloc_size);
+ if (sizeof_priv)
+ dev->priv = (void *) (dev + 1);
+ dev->name = sizeof_priv + (char *)(dev + 1);
+ new_device = 1;
+ }
+
+ found: /* From the double loop above. */
+
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+ debug ("setup device %s", dev->name);
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ ether_setup(dev); /* Hmmm, should this be called here? */
+
+ if (new_device) {
+ /* Append the device to the device queue. */
+ struct device **old_devp = &dev_base;
+ if (*old_devp == NULL)
+ *old_devp = dev;
+ while ((*old_devp)->next)
+ old_devp = & (*old_devp)->next;
+ (*old_devp)->next = dev;
+ dev->next = 0;
+ }
+ return dev;
+}
+
+
+//static int eth_mac_addr(struct device *dev, void *p)
+//{
+// struct sockaddr *addr=p;
+// if(dev->start)
+// return -EBUSY;
+// memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+// return 0;
+//}
+//
+//static int eth_change_mtu(struct device *dev, int new_mtu)
+//{
+// if ((new_mtu < 68) || (new_mtu > 1500))
+// return -EINVAL;
+// dev->mtu = new_mtu;
+// return 0;
+//}
+
+#ifdef CONFIG_FDDI
+
+static int fddi_change_mtu(struct device *dev, int new_mtu)
+{
+ if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
+ return(-EINVAL);
+ dev->mtu = new_mtu;
+ return(0);
+}
+
+#endif
+
+void ether_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ /* register boot-defined "eth" devices */
+ if (dev->name && (strncmp(dev->name, "eth", 3) == 0)) {
+// i = simple_strtoul(dev->name + 3, NULL, 0);
+ i = strtoul(dev->name + 3, NULL, 0);
+ if (ethdev_index[i] == NULL) {
+ ethdev_index[i] = dev;
+ }
+ else if (dev != ethdev_index[i]) {
+ /* Really shouldn't happen! */
+#ifdef MACH
+ panic("ether_setup: Ouch! Someone else took %s\n",
+ dev->name);
+#else
+ printk("ether_setup: Ouch! Someone else took %s\n",
+ dev->name);
+#endif
+ }
+ }
+
+#ifndef MACH
+ dev->change_mtu = eth_change_mtu;
+ dev->hard_header = eth_header;
+ dev->rebuild_header = eth_rebuild_header;
+ dev->set_mac_address = eth_mac_addr;
+ dev->header_cache_bind = eth_header_cache_bind;
+ dev->header_cache_update= eth_header_cache_update;
+#endif
+
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = ETH_HLEN;
+ dev->mtu = 1500; /* eth_mtu */
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 100; /* Ethernet wants good queues */
+
+ memset(dev->broadcast,0xFF, ETH_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#ifdef CONFIG_TR
+
+void tr_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->hard_header = tr_header;
+ dev->rebuild_header = tr_rebuild_header;
+
+ dev->type = ARPHRD_IEEE802;
+ dev->hard_header_len = TR_HLEN;
+ dev->mtu = 2000; /* bug in fragmenter...*/
+ dev->addr_len = TR_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on tr */
+
+ memset(dev->broadcast,0xFF, TR_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#endif
+
+#ifdef CONFIG_FDDI
+
+void fddi_setup(struct device *dev)
+ {
+ int i;
+
+ /*
+ * Fill in the fields of the device structure with FDDI-generic values.
+ * This should be in a common file instead of per-driver.
+ */
+ for (i=0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->change_mtu = fddi_change_mtu;
+ dev->hard_header = fddi_header;
+ dev->rebuild_header = fddi_rebuild_header;
+
+ dev->type = ARPHRD_FDDI;
+ dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
+ dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
+ dev->addr_len = FDDI_K_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on FDDI */
+
+ memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
+
+ /* New-style flags */
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+ return;
+ }
+
+#endif
+
+int ether_config(struct device *dev, struct ifmap *map)
+{
+ if (map->mem_start != (u_long)(-1))
+ dev->mem_start = map->mem_start;
+ if (map->mem_end != (u_long)(-1))
+ dev->mem_end = map->mem_end;
+ if (map->base_addr != (u_short)(-1))
+ dev->base_addr = map->base_addr;
+ if (map->irq != (u_char)(-1))
+ dev->irq = map->irq;
+ if (map->dma != (u_char)(-1))
+ dev->dma = map->dma;
+ if (map->port != (u_char)(-1))
+ dev->if_port = map->port;
+ return 0;
+}
+
+//int register_netdev(struct device *dev)
+//{
+// struct device *d = dev_base;
+// unsigned long flags;
+// int i=MAX_ETH_CARDS;
+//
+// save_flags(flags);
+// cli();
+//
+// if (dev && dev->init) {
+// if (dev->name &&
+// ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+// for (i = 0; i < MAX_ETH_CARDS; ++i)
+// if (ethdev_index[i] == NULL) {
+// sprintf(dev->name, "eth%d", i);
+// printk("loading device '%s'...\n", dev->name);
+// ethdev_index[i] = dev;
+// break;
+// }
+// }
+//
+// sti(); /* device probes assume interrupts enabled */
+// if (dev->init(dev) != 0) {
+// if (i < MAX_ETH_CARDS) ethdev_index[i] = NULL;
+// restore_flags(flags);
+// return -EIO;
+// }
+// cli();
+//
+// /* Add device to end of chain */
+// if (dev_base) {
+// while (d->next)
+// d = d->next;
+// d->next = dev;
+// }
+// else
+// dev_base = dev;
+// dev->next = NULL;
+// }
+// restore_flags(flags);
+// return 0;
+//}
+//
+//void unregister_netdev(struct device *dev)
+//{
+// struct device *d = dev_base;
+// unsigned long flags;
+// int i;
+//
+// save_flags(flags);
+// cli();
+//
+// if (dev == NULL)
+// {
+// printk("was NULL\n");
+// restore_flags(flags);
+// return;
+// }
+// /* else */
+// if (dev->start)
+// printk("ERROR '%s' busy and not MOD_IN_USE.\n", dev->name);
+//
+// /*
+// * must jump over main_device+aliases
+// * avoid alias devices unregistration so that only
+// * net_alias module manages them
+// */
+//#ifdef CONFIG_NET_ALIAS
+// if (dev_base == dev)
+// dev_base = net_alias_nextdev(dev);
+// else
+// {
+// while(d && (net_alias_nextdev(d) != dev)) /* skip aliases */
+// d = net_alias_nextdev(d);
+//
+// if (d && (net_alias_nextdev(d) == dev))
+// {
+// /*
+// * Critical: Bypass by consider devices as blocks (maindev+aliases)
+// */
+// net_alias_nextdev_set(d, net_alias_nextdev(dev));
+// }
+//#else
+// if (dev_base == dev)
+// dev_base = dev->next;
+// else
+// {
+// while (d && (d->next != dev))
+// d = d->next;
+//
+// if (d && (d->next == dev))
+// {
+// d->next = dev->next;
+// }
+//#endif
+// else
+// {
+// printk("unregister_netdev: '%s' not found\n", dev->name);
+// restore_flags(flags);
+// return;
+// }
+// }
+// for (i = 0; i < MAX_ETH_CARDS; ++i)
+// {
+// if (ethdev_index[i] == dev)
+// {
+// ethdev_index[i] = NULL;
+// break;
+// }
+// }
+//
+// restore_flags(flags);
+//
+// /*
+// * You can i.e use a interfaces in a route though it is not up.
+// * We call close_dev (which is changed: it will down a device even if
+// * dev->flags==0 (but it will not call dev->stop if IFF_UP
+// * is not set).
+// * This will call notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev),
+// * dev_mc_discard(dev), ....
+// */
+//
+// dev_close(dev);
+//}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c net_init.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/pcnet32/netdevice.h b/pcnet32/netdevice.h
new file mode 100644
index 000000000..8c84f0811
--- /dev/null
+++ b/pcnet32/netdevice.h
@@ -0,0 +1,335 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Interfaces handler.
+ *
+ * Version: @(#)dev.h 1.0.11 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Donald J. Becker, <becker@super.org>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ * Bjorn Ekwall. <bj0rn@blox.se>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Moved to /usr/include/linux for NET3
+ * Added extern for fddi_setup()
+ */
+#ifndef _LINUX_NETDEVICE_H
+#define _LINUX_NETDEVICE_H
+
+#include <hurd.h>
+#include <hurd/ports.h>
+
+#include "skbuff.h"
+#include "if.h"
+
+/* for future expansion when we will have different priorities. */
+#define DEV_NUMBUFFS 3
+#define MAX_ADDR_LEN 7
+#ifndef CONFIG_AX25
+#ifndef CONFIG_TR
+#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE)
+#define MAX_HEADER 32 /* We really need about 18 worst case .. so 32 is aligned */
+#else
+#define MAX_HEADER 80 /* We need to allow for having tunnel headers */
+#endif /* IPIP */
+#else
+#define MAX_HEADER 48 /* Token Ring header needs 40 bytes ... 48 is aligned */
+#endif /* TR */
+#else
+#define MAX_HEADER 96 /* AX.25 + NetROM */
+#endif /* AX25 */
+
+#define IS_MYADDR 1 /* address is (one of) our own */
+#define IS_LOOPBACK 2 /* address is for LOOPBACK */
+#define IS_BROADCAST 3 /* address is a valid broadcast */
+#define IS_INVBCAST 4 /* Wrong netmask bcast not for us (unused)*/
+#define IS_MULTICAST 5 /* Multicast IP address */
+
+/*
+ * We tag multicasts with these structures.
+ */
+
+struct dev_mc_list
+{
+ struct dev_mc_list *next;
+ char dmi_addr[MAX_ADDR_LEN];
+ unsigned short dmi_addrlen;
+ unsigned short dmi_users;
+};
+
+struct hh_cache
+{
+ struct hh_cache *hh_next;
+ void *hh_arp; /* Opaque pointer, used by
+ * any address resolution module,
+ * not only ARP.
+ */
+ int hh_refcnt; /* number of users */
+ unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */
+ char hh_uptodate; /* hh_data is valid */
+ char hh_data[16]; /* cached hardware header */
+};
+
+/*
+ * The DEVICE structure.
+ * Actually, this whole structure is a big mistake. It mixes I/O
+ * data with strictly "high-level" data, and it has to know about
+ * almost every data structure used in the INET module.
+ */
+
+#ifdef MACH
+
+#ifndef MACH_INCLUDE
+#define device linux_device
+#endif
+
+struct linux_device
+
+#else
+
+struct device
+
+#endif
+{
+ struct port_info port;
+
+ /*
+ * This is the first field of the "visible" part of this structure
+ * (i.e. as seen by users in the "Space.c" file). It is the name
+ * the interface.
+ */
+ char *name;
+
+ /* I/O specific fields - FIXME: Merge these and struct ifmap into one */
+ unsigned long rmem_end; /* shmem "recv" end */
+ unsigned long rmem_start; /* shmem "recv" start */
+ unsigned long mem_end; /* shared mem end */
+ unsigned long mem_start; /* shared mem start */
+ unsigned long base_addr; /* device I/O address */
+ unsigned char irq; /* device IRQ number */
+ unsigned long dev_id; /* device ID */
+
+ /* Low-level status flags. */
+ volatile unsigned char start, /* start an operation */
+ interrupt; /* interrupt arrived */
+ unsigned long tbusy; /* transmitter busy must be long for bitops */
+
+ struct linux_device *next;
+
+ /* The device initialization function. Called only once. */
+ int (*init)(struct linux_device *dev);
+
+ /* Some hardware also needs these fields, but they are not part of the
+ usual set specified in Space.c. */
+ unsigned char if_port; /* Selectable AUI, TP,..*/
+ unsigned char dma; /* DMA channel */
+
+ struct enet_statistics* (*get_stats)(struct linux_device *dev);
+
+ /*
+ * This marks the end of the "visible" part of the structure. All
+ * fields hereafter are internal to the system, and may change at
+ * will (read: may be cleaned up at will).
+ */
+
+ /* These may be needed for future network-power-down code. */
+ unsigned long trans_start; /* Time (in jiffies) of last Tx */
+ unsigned long last_rx; /* Time of last Rx */
+
+ unsigned short flags; /* interface flags (a la BSD) */
+ unsigned short family; /* address family ID (AF_INET) */
+ unsigned short metric; /* routing metric (not used) */
+ unsigned short mtu; /* interface MTU value */
+ unsigned short type; /* interface hardware type */
+ unsigned short hard_header_len; /* hardware hdr length */
+ void *priv; /* pointer to private data */
+
+ /* Interface address info. */
+ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+ unsigned char pad; /* make dev_addr aligned to 8 bytes */
+ unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
+ unsigned char addr_len; /* hardware address length */
+ unsigned long pa_addr; /* protocol address */
+ unsigned long pa_brdaddr; /* protocol broadcast addr */
+ unsigned long pa_dstaddr; /* protocol P-P other side addr */
+ unsigned long pa_mask; /* protocol netmask */
+ unsigned short pa_alen; /* protocol address length */
+
+ struct dev_mc_list *mc_list; /* Multicast mac addresses */
+ int mc_count; /* Number of installed mcasts */
+
+ struct ip_mc_list *ip_mc_list; /* IP multicast filter chain */
+ __u32 tx_queue_len; /* Max frames per queue allowed */
+
+ /* For load balancing driver pair support */
+
+ unsigned long pkt_queue; /* Packets queued */
+ struct linux_device *slave; /* Slave device */
+ struct net_alias_info *alias_info; /* main dev alias info */
+ struct net_alias *my_alias; /* alias devs */
+
+ /* Pointer to the interface buffers. */
+ struct sk_buff_head buffs[DEV_NUMBUFFS];
+
+ /* Pointers to interface service routines. */
+ int (*open)(struct linux_device *dev);
+ int (*stop)(struct linux_device *dev);
+ int (*hard_start_xmit) (struct sk_buff *skb,
+ struct linux_device *dev);
+ int (*hard_header) (struct sk_buff *skb,
+ struct linux_device *dev,
+ unsigned short type,
+ void *daddr,
+ void *saddr,
+ unsigned len);
+ int (*rebuild_header)(void *eth, struct linux_device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+#define HAVE_MULTICAST
+ void (*set_multicast_list)(struct linux_device *dev);
+#define HAVE_SET_MAC_ADDR
+ int (*set_mac_address)(struct linux_device *dev, void *addr);
+#define HAVE_PRIVATE_IOCTL
+ int (*do_ioctl)(struct linux_device *dev, struct ifreq *ifr, int cmd);
+#define HAVE_SET_CONFIG
+ int (*set_config)(struct linux_device *dev, struct ifmap *map);
+#define HAVE_HEADER_CACHE
+ void (*header_cache_bind)(struct hh_cache **hhp, struct linux_device *dev, unsigned short htype, __u32 daddr);
+ void (*header_cache_update)(struct hh_cache *hh, struct linux_device *dev, unsigned char * haddr);
+#define HAVE_CHANGE_MTU
+ int (*change_mtu)(struct linux_device *dev, int new_mtu);
+
+ struct iw_statistics* (*get_wireless_stats)(struct linux_device *dev);
+
+#ifdef MACH
+
+#ifdef MACH_INCLUDE
+ struct net_data *net_data;
+#else
+ void *net_data;
+#endif
+
+#endif
+};
+
+
+struct packet_type {
+ unsigned short type; /* This is really htons(ether_type). */
+ struct linux_device * dev;
+ int (*func) (struct sk_buff *, struct linux_device *,
+ struct packet_type *);
+ void *data;
+ struct packet_type *next;
+};
+
+
+/* Used by dev_rint */
+#define IN_SKBUFF 1
+
+extern volatile unsigned long in_bh;
+
+extern struct linux_device loopback_dev;
+extern struct linux_device *dev_base;
+extern struct packet_type *ptype_base[16];
+
+
+extern int ip_addr_match(unsigned long addr1, unsigned long addr2);
+extern int ip_chk_addr(unsigned long addr);
+extern struct linux_device *ip_dev_bynet(unsigned long daddr, unsigned long mask);
+extern unsigned long ip_my_addr(void);
+extern unsigned long ip_get_mask(unsigned long addr);
+extern struct linux_device *ip_dev_find(unsigned long addr);
+extern struct linux_device *dev_getbytype(unsigned short type);
+
+extern void dev_add_pack(struct packet_type *pt);
+extern void dev_remove_pack(struct packet_type *pt);
+extern struct linux_device *dev_get(const char *name);
+extern int dev_open(struct linux_device *dev);
+extern int dev_close(struct linux_device *dev);
+extern void dev_queue_xmit(struct sk_buff *skb, struct linux_device *dev,
+ int pri);
+
+#define HAVE_NETIF_RX 1
+extern void netif_rx(struct sk_buff *skb);
+extern void net_bh(void);
+
+#ifdef MACH
+#define dev_tint(dev)
+#else
+extern void dev_tint(struct linux_device *dev);
+#endif
+
+extern int dev_change_flags(struct linux_device *dev, short flags);
+extern int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int dev_ioctl(unsigned int cmd, void *);
+
+extern void dev_init(void);
+
+/* Locking protection for page faults during outputs to devices unloaded during the fault */
+
+extern int dev_lockct;
+
+/*
+ * These two don't currently need to be interrupt-safe
+ * but they may do soon. Do it properly anyway.
+ */
+
+//extern __inline__ void dev_lock_list(void)
+//{
+// unsigned long flags;
+// save_flags(flags);
+// cli();
+// dev_lockct++;
+// restore_flags(flags);
+//}
+//
+//extern __inline__ void dev_unlock_list(void)
+//{
+// unsigned long flags;
+// save_flags(flags);
+// cli();
+// dev_lockct--;
+// restore_flags(flags);
+//}
+//
+///*
+// * This almost never occurs, isn't in performance critical paths
+// * and we can thus be relaxed about it
+// */
+//
+//extern __inline__ void dev_lock_wait(void)
+//{
+// while(dev_lockct)
+// schedule();
+//}
+
+
+/* These functions live elsewhere (drivers/net/net_init.c, but related) */
+
+extern void ether_setup(struct linux_device *dev);
+extern void tr_setup(struct linux_device *dev);
+extern void fddi_setup(struct linux_device *dev);
+extern int ether_config(struct linux_device *dev, struct ifmap *map);
+/* Support for loadable net-drivers */
+extern int register_netdev(struct linux_device *dev);
+extern void unregister_netdev(struct linux_device *dev);
+//extern int register_netdevice_notifier(struct notifier_block *nb);
+//extern int unregister_netdevice_notifier(struct notifier_block *nb);
+/* Functions used for multicast support */
+extern void dev_mc_upload(struct linux_device *dev);
+extern void dev_mc_delete(struct linux_device *dev, void *addr, int alen, int all);
+extern void dev_mc_add(struct linux_device *dev, void *addr, int alen, int newonly);
+extern void dev_mc_discard(struct linux_device *dev);
+/* This is the wrong place but it'll do for the moment */
+extern void ip_mc_allhost(struct linux_device *dev);
+
+#endif /* _LINUX_DEV_H */
diff --git a/pcnet32/notify.defs b/pcnet32/notify.defs
new file mode 100644
index 000000000..2014be5ca
--- /dev/null
+++ b/pcnet32/notify.defs
@@ -0,0 +1 @@
+#include <mach/notify.defs>
diff --git a/pcnet32/pci.h b/pcnet32/pci.h
new file mode 100644
index 000000000..3508979fd
--- /dev/null
+++ b/pcnet32/pci.h
@@ -0,0 +1,1114 @@
+/*
+ * PCI defines and function prototypes
+ * Copyright 1994, Drew Eckhardt
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ */
+
+
+
+/* PROCEDURE TO REPORT NEW PCI DEVICES
+ * We are trying to collect information on new PCI devices, using
+ * the standard PCI identification procedure. If some warning is
+ * displayed at boot time, please report
+ * - /proc/pci
+ * - your exact hardware description. Try to find out
+ * which device is unknown. It may be you mainboard chipset.
+ * PCI-CPU bridge or PCI-ISA bridge.
+ * - If you can't find the actual information in your hardware
+ * booklet, try to read the references of the chip on the board.
+ * - Send all that to linux-pcisupport@cck.uni-kl.de
+ * and I'll add your device to the list as soon as possible
+ *
+ * BEFORE you send a mail, please check the latest linux releases
+ * to be sure it has not been recently added.
+ *
+ * Thanks
+ * Jens Maurer
+ */
+
+
+
+#ifndef LINUX_PCI_H
+#define LINUX_PCI_H
+
+/*
+ * Under PCI, each device has 256 bytes of configuration address space,
+ * of which the first 64 bytes are standardized as follows:
+ */
+#define PCI_VENDOR_ID 0x00 /* 16 bits */
+#define PCI_DEVICE_ID 0x02 /* 16 bits */
+#define PCI_COMMAND 0x04 /* 16 bits */
+#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
+#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
+#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
+#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */
+#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
+#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
+#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
+#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
+#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
+#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
+
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */
+#define PCI_STATUS_UDF 0x40 /* Support User Definable Features */
+
+#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
+#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
+#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
+#define PCI_STATUS_DEVSEL_FAST 0x000
+#define PCI_STATUS_DEVSEL_MEDIUM 0x200
+#define PCI_STATUS_DEVSEL_SLOW 0x400
+#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
+#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
+#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
+#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
+#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
+
+#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8
+ revision */
+#define PCI_REVISION_ID 0x08 /* Revision ID */
+#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
+#define PCI_CLASS_DEVICE 0x0a /* Device class */
+
+#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
+#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
+#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_BIST 0x0f /* 8 bits */
+#define PCI_BIST_CODE_MASK 0x0f /* Return result */
+#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
+#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
+
+/*
+ * Base addresses specify locations in memory or I/O space.
+ * Decoded size can be determined by writing a value of
+ * 0xffffffff to the register, and reading it back. Only
+ * 1 bits are decoded.
+ */
+#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits */
+#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits */
+#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
+#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
+#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
+#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
+#define PCI_BASE_ADDRESS_SPACE_IO 0x01
+#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
+#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
+#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
+#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M */
+#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
+#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
+#define PCI_BASE_ADDRESS_MEM_MASK (~0x0f)
+#define PCI_BASE_ADDRESS_IO_MASK (~0x03)
+/* bit 1 is reserved if address_space = 1 */
+
+#define PCI_CARDBUS_CIS 0x28
+#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
+#define PCI_SUBSYSTEM_ID 0x2e
+#define PCI_ROM_ADDRESS 0x30 /* 32 bits */
+#define PCI_ROM_ADDRESS_ENABLE 0x01 /* Write 1 to enable ROM,
+ bits 31..11 are address,
+ 10..2 are reserved */
+/* 0x34-0x3b are reserved */
+#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
+#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
+#define PCI_MIN_GNT 0x3e /* 8 bits */
+#define PCI_MAX_LAT 0x3f /* 8 bits */
+
+#define PCI_CLASS_NOT_DEFINED 0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
+
+#define PCI_BASE_CLASS_STORAGE 0x01
+#define PCI_CLASS_STORAGE_SCSI 0x0100
+#define PCI_CLASS_STORAGE_IDE 0x0101
+#define PCI_CLASS_STORAGE_FLOPPY 0x0102
+#define PCI_CLASS_STORAGE_IPI 0x0103
+#define PCI_CLASS_STORAGE_RAID 0x0104
+#define PCI_CLASS_STORAGE_OTHER 0x0180
+
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_CLASS_NETWORK_ETHERNET 0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
+#define PCI_CLASS_NETWORK_FDDI 0x0202
+#define PCI_CLASS_NETWORK_ATM 0x0203
+#define PCI_CLASS_NETWORK_OTHER 0x0280
+
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_CLASS_DISPLAY_VGA 0x0300
+#define PCI_CLASS_DISPLAY_XGA 0x0301
+#define PCI_CLASS_DISPLAY_OTHER 0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
+#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
+
+#define PCI_BASE_CLASS_MEMORY 0x05
+#define PCI_CLASS_MEMORY_RAM 0x0500
+#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_OTHER 0x0580
+
+#define PCI_BASE_CLASS_BRIDGE 0x06
+#define PCI_CLASS_BRIDGE_HOST 0x0600
+#define PCI_CLASS_BRIDGE_ISA 0x0601
+#define PCI_CLASS_BRIDGE_EISA 0x0602
+#define PCI_CLASS_BRIDGE_MC 0x0603
+#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
+#define PCI_CLASS_BRIDGE_NUBUS 0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
+#define PCI_CLASS_BRIDGE_OTHER 0x0680
+
+
+#define PCI_BASE_CLASS_COMMUNICATION 0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+
+#define PCI_BASE_CLASS_SYSTEM 0x08
+#define PCI_CLASS_SYSTEM_PIC 0x0800
+#define PCI_CLASS_SYSTEM_DMA 0x0801
+#define PCI_CLASS_SYSTEM_TIMER 0x0802
+#define PCI_CLASS_SYSTEM_RTC 0x0803
+#define PCI_CLASS_SYSTEM_OTHER 0x0880
+
+#define PCI_BASE_CLASS_INPUT 0x09
+#define PCI_CLASS_INPUT_KEYBOARD 0x0900
+#define PCI_CLASS_INPUT_PEN 0x0901
+#define PCI_CLASS_INPUT_MOUSE 0x0902
+#define PCI_CLASS_INPUT_OTHER 0x0980
+
+#define PCI_BASE_CLASS_DOCKING 0x0a
+#define PCI_CLASS_DOCKING_GENERIC 0x0a00
+#define PCI_CLASS_DOCKING_OTHER 0x0a01
+
+#define PCI_BASE_CLASS_PROCESSOR 0x0b
+#define PCI_CLASS_PROCESSOR_386 0x0b00
+#define PCI_CLASS_PROCESSOR_486 0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
+#define PCI_CLASS_PROCESSOR_CO 0x0b40
+
+#define PCI_BASE_CLASS_SERIAL 0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
+#define PCI_CLASS_SERIAL_ACCESS 0x0c01
+#define PCI_CLASS_SERIAL_SSA 0x0c02
+#define PCI_CLASS_SERIAL_USB 0x0c03
+#define PCI_CLASS_SERIAL_FIBER 0x0c04
+
+#define PCI_CLASS_OTHERS 0xff
+
+/*
+ * Vendor and card ID's: sort these numerically according to vendor
+ * (and according to card ID within vendor). Send all updates to
+ * <linux-pcisupport@cck.uni-kl.de>.
+ */
+#define PCI_VENDOR_ID_COMPAQ 0x0e11
+#define PCI_DEVICE_ID_COMPAQ_1280 0x3033
+#define PCI_DEVICE_ID_COMPAQ_TRIFLEX 0x4000
+#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10
+#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32
+#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35
+#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40
+#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43
+#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011
+#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150
+
+#define PCI_VENDOR_ID_NCR 0x1000
+#define PCI_DEVICE_ID_NCR_53C810 0x0001
+#define PCI_DEVICE_ID_NCR_53C820 0x0002
+#define PCI_DEVICE_ID_NCR_53C825 0x0003
+#define PCI_DEVICE_ID_NCR_53C815 0x0004
+#define PCI_DEVICE_ID_NCR_53C860 0x0006
+#define PCI_DEVICE_ID_NCR_53C896 0x000b
+#define PCI_DEVICE_ID_NCR_53C895 0x000c
+#define PCI_DEVICE_ID_NCR_53C885 0x000d
+#define PCI_DEVICE_ID_NCR_53C875 0x000f
+#define PCI_DEVICE_ID_NCR_53C875J 0x008f
+
+#define PCI_VENDOR_ID_ATI 0x1002
+#define PCI_DEVICE_ID_ATI_68800 0x4158
+#define PCI_DEVICE_ID_ATI_215CT222 0x4354
+#define PCI_DEVICE_ID_ATI_210888CX 0x4358
+#define PCI_DEVICE_ID_ATI_215GB 0x4742
+#define PCI_DEVICE_ID_ATI_215GD 0x4744
+#define PCI_DEVICE_ID_ATI_215GI 0x4749
+#define PCI_DEVICE_ID_ATI_215GP 0x4750
+#define PCI_DEVICE_ID_ATI_215GQ 0x4751
+#define PCI_DEVICE_ID_ATI_215GT 0x4754
+#define PCI_DEVICE_ID_ATI_215GTB 0x4755
+#define PCI_DEVICE_ID_ATI_210888GX 0x4758
+#define PCI_DEVICE_ID_ATI_215LG 0x4c47
+#define PCI_DEVICE_ID_ATI_264LT 0x4c54
+#define PCI_DEVICE_ID_ATI_264VT 0x5654
+
+#define PCI_VENDOR_ID_VLSI 0x1004
+#define PCI_DEVICE_ID_VLSI_82C592 0x0005
+#define PCI_DEVICE_ID_VLSI_82C593 0x0006
+#define PCI_DEVICE_ID_VLSI_82C594 0x0007
+#define PCI_DEVICE_ID_VLSI_82C597 0x0009
+#define PCI_DEVICE_ID_VLSI_82C541 0x000c
+#define PCI_DEVICE_ID_VLSI_82C543 0x000d
+#define PCI_DEVICE_ID_VLSI_82C532 0x0101
+#define PCI_DEVICE_ID_VLSI_82C534 0x0102
+#define PCI_DEVICE_ID_VLSI_82C535 0x0104
+#define PCI_DEVICE_ID_VLSI_82C147 0x0105
+#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+
+#define PCI_VENDOR_ID_ADL 0x1005
+#define PCI_DEVICE_ID_ADL_2301 0x2301
+
+#define PCI_VENDOR_ID_NS 0x100b
+#define PCI_DEVICE_ID_NS_87415 0x0002
+#define PCI_DEVICE_ID_NS_87410 0xd001
+
+#define PCI_VENDOR_ID_TSENG 0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207
+#define PCI_DEVICE_ID_TSENG_ET6000 0x3208
+
+#define PCI_VENDOR_ID_WEITEK 0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000 0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100 0x9100
+
+#define PCI_VENDOR_ID_DEC 0x1011
+#define PCI_DEVICE_ID_DEC_BRD 0x0001
+#define PCI_DEVICE_ID_DEC_TULIP 0x0002
+#define PCI_DEVICE_ID_DEC_TGA 0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
+#define PCI_DEVICE_ID_DEC_TGA2 0x000D
+#define PCI_DEVICE_ID_DEC_FDDI 0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014
+#define PCI_DEVICE_ID_DEC_21142 0x0019
+#define PCI_DEVICE_ID_DEC_21052 0x0021
+#define PCI_DEVICE_ID_DEC_21150 0x0022
+#define PCI_DEVICE_ID_DEC_21152 0x0024
+#define PCI_DEVICE_ID_DEC_21154 0x0026
+#define PCI_DEVICE_ID_DEC_21285 0x1065
+
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#define PCI_DEVICE_ID_CIRRUS_7548 0x0038
+#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac
+#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8
+#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc
+#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4
+#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
+#define PCI_DEVICE_ID_CIRRUS_7542 0x1200
+#define PCI_DEVICE_ID_CIRRUS_7543 0x1202
+#define PCI_DEVICE_ID_CIRRUS_7541 0x1204
+
+#define PCI_VENDOR_ID_IBM 0x1014
+#define PCI_DEVICE_ID_IBM_FIRE_CORAL 0x000a
+#define PCI_DEVICE_ID_IBM_TR 0x0018
+#define PCI_DEVICE_ID_IBM_82G2675 0x001d
+#define PCI_DEVICE_ID_IBM_MCA 0x0020
+#define PCI_DEVICE_ID_IBM_82351 0x0022
+#define PCI_DEVICE_ID_IBM_SERVERAID 0x002e
+#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e
+#define PCI_DEVICE_ID_IBM_3780IDSP 0x007d
+
+#define PCI_VENDOR_ID_WD 0x101c
+#define PCI_DEVICE_ID_WD_7197 0x3296
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+
+#define PCI_VENDOR_ID_TRIDENT 0x1023
+#define PCI_DEVICE_ID_TRIDENT_9397 0x9397
+#define PCI_DEVICE_ID_TRIDENT_9420 0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440 0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660 0x9660
+#define PCI_DEVICE_ID_TRIDENT_9750 0x9750
+
+#define PCI_VENDOR_ID_AI 0x1025
+#define PCI_DEVICE_ID_AI_M1435 0x1435
+
+#define PCI_VENDOR_ID_MATROX 0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
+#define PCI_DEVICE_ID_MATROX_MIL 0x0519
+#define PCI_DEVICE_ID_MATROX_MYS 0x051A
+#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b
+#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f
+#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520
+#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521
+#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10
+#define PCI_DEVICE_ID_MATROX_G100_MM 0x1000
+#define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001
+
+#define PCI_VENDOR_ID_CT 0x102c
+#define PCI_DEVICE_ID_CT_65545 0x00d8
+#define PCI_DEVICE_ID_CT_65548 0x00dc
+#define PCI_DEVICE_ID_CT_65550 0x00e0
+#define PCI_DEVICE_ID_CT_65554 0x00e4
+#define PCI_DEVICE_ID_CT_65555 0x00e5
+
+#define PCI_VENDOR_ID_MIRO 0x1031
+#define PCI_DEVICE_ID_MIRO_36050 0x5601
+
+#define PCI_VENDOR_ID_NEC 0x1033
+#define PCI_DEVICE_ID_NEC_PCX2 0x0046
+
+#define PCI_VENDOR_ID_FD 0x1036
+#define PCI_DEVICE_ID_FD_36C70 0x0000
+
+#define PCI_VENDOR_ID_SI 0x1039
+#define PCI_DEVICE_ID_SI_5591_AGP 0x0001
+#define PCI_DEVICE_ID_SI_6202 0x0002
+#define PCI_DEVICE_ID_SI_503 0x0008
+#define PCI_DEVICE_ID_SI_ACPI 0x0009
+#define PCI_DEVICE_ID_SI_5597_VGA 0x0200
+#define PCI_DEVICE_ID_SI_6205 0x0205
+#define PCI_DEVICE_ID_SI_501 0x0406
+#define PCI_DEVICE_ID_SI_496 0x0496
+#define PCI_DEVICE_ID_SI_601 0x0601
+#define PCI_DEVICE_ID_SI_5107 0x5107
+#define PCI_DEVICE_ID_SI_5511 0x5511
+#define PCI_DEVICE_ID_SI_5513 0x5513
+#define PCI_DEVICE_ID_SI_5571 0x5571
+#define PCI_DEVICE_ID_SI_5591 0x5591
+#define PCI_DEVICE_ID_SI_5597 0x5597
+#define PCI_DEVICE_ID_SI_7001 0x7001
+
+#define PCI_VENDOR_ID_HP 0x103c
+#define PCI_DEVICE_ID_HP_J2585A 0x1030
+#define PCI_DEVICE_ID_HP_J2585B 0x1031
+
+#define PCI_VENDOR_ID_PCTECH 0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
+#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_0 0x3000
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_1 0x3010
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
+
+#define PCI_VENDOR_ID_DPT 0x1044
+#define PCI_DEVICE_ID_DPT 0xa400
+
+#define PCI_VENDOR_ID_OPTI 0x1045
+#define PCI_DEVICE_ID_OPTI_92C178 0xc178
+#define PCI_DEVICE_ID_OPTI_82C557 0xc557
+#define PCI_DEVICE_ID_OPTI_82C558 0xc558
+#define PCI_DEVICE_ID_OPTI_82C621 0xc621
+#define PCI_DEVICE_ID_OPTI_82C700 0xc700
+#define PCI_DEVICE_ID_OPTI_82C701 0xc701
+#define PCI_DEVICE_ID_OPTI_82C814 0xc814
+#define PCI_DEVICE_ID_OPTI_82C822 0xc822
+#define PCI_DEVICE_ID_OPTI_82C825 0xd568
+
+#define PCI_VENDOR_ID_SGS 0x104a
+#define PCI_DEVICE_ID_SGS_2000 0x0008
+#define PCI_DEVICE_ID_SGS_1764 0x0009
+
+#define PCI_VENDOR_ID_BUSLOGIC 0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130
+
+#define PCI_VENDOR_ID_TI 0x104c
+#define PCI_DEVICE_ID_TI_TVP4010 0x3d04
+#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
+#define PCI_DEVICE_ID_TI_PCI1130 0xac12
+#define PCI_DEVICE_ID_TI_PCI1131 0xac15
+#define PCI_DEVICE_ID_TI_PCI1250 0xac16
+
+#define PCI_VENDOR_ID_OAK 0x104e
+#define PCI_DEVICE_ID_OAK_OTI107 0x0107
+
+/* Winbond have two vendor IDs! See 0x10ad as well */
+#define PCI_VENDOR_ID_WINBOND2 0x1050
+#define PCI_DEVICE_ID_WINBOND2_89C940 0x0940
+
+#define PCI_VENDOR_ID_MOTOROLA 0x1057
+#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001
+#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002
+#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801
+
+#define PCI_VENDOR_ID_PROMISE 0x105a
+#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
+#define PCI_DEVICE_ID_PROMISE_20262 0x4d38
+#define PCI_DEVICE_ID_PROMISE_5300 0x5300
+
+#define PCI_VENDOR_ID_N9 0x105d
+#define PCI_DEVICE_ID_N9_I128 0x2309
+#define PCI_DEVICE_ID_N9_I128_2 0x2339
+#define PCI_DEVICE_ID_N9_I128_T2R 0x493d
+
+#define PCI_VENDOR_ID_UMC 0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
+#define PCI_DEVICE_ID_UMC_UM8891A 0x0891
+#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A 0x886a
+#define PCI_DEVICE_ID_UMC_UM8881F 0x8881
+#define PCI_DEVICE_ID_UMC_UM8886F 0x8886
+#define PCI_DEVICE_ID_UMC_UM9017F 0x9017
+#define PCI_DEVICE_ID_UMC_UM8886N 0xe886
+#define PCI_DEVICE_ID_UMC_UM8891N 0xe891
+
+#define PCI_VENDOR_ID_X 0x1061
+#define PCI_DEVICE_ID_X_AGX016 0x0001
+
+#define PCI_VENDOR_ID_PICOP 0x1066
+#define PCI_DEVICE_ID_PICOP_PT86C52X 0x0001
+#define PCI_DEVICE_ID_PICOP_PT80C524 0x8002
+
+#define PCI_VENDOR_ID_MYLEX 0x1069
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V2 0x0001
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V3 0x0002
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V4 0x0010
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V5 0x0020
+
+#define PCI_VENDOR_ID_APPLE 0x106b
+#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001
+#define PCI_DEVICE_ID_APPLE_GC 0x0002
+#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e
+
+#define PCI_VENDOR_ID_NEXGEN 0x1074
+#define PCI_DEVICE_ID_NEXGEN_82C501 0x4e78
+
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP1022 0x1022
+
+#define PCI_VENDOR_ID_CYRIX 0x1078
+#define PCI_DEVICE_ID_CYRIX_5510 0x0000
+#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001
+#define PCI_DEVICE_ID_CYRIX_5520 0x0002
+#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100
+#define PCI_DEVICE_ID_CYRIX_5530_SMI 0x0101
+#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102
+#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103
+#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104
+
+#define PCI_VENDOR_ID_LEADTEK 0x107d
+#define PCI_DEVICE_ID_LEADTEK_805 0x0000
+
+#define PCI_VENDOR_ID_CONTAQ 0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C599 0x0600
+#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693
+
+#define PCI_VENDOR_ID_FOREX 0x1083
+
+#define PCI_VENDOR_ID_OLICOM 0x108d
+#define PCI_DEVICE_ID_OLICOM_OC3136 0x0001
+#define PCI_DEVICE_ID_OLICOM_OC2315 0x0011
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#define PCI_DEVICE_ID_OLICOM_OC6151 0x0021
+
+#define PCI_VENDOR_ID_SUN 0x108e
+#define PCI_DEVICE_ID_SUN_EBUS 0x1000
+#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001
+#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
+#define PCI_DEVICE_ID_SUN_PBM 0x8000
+#define PCI_DEVICE_ID_SUN_SABRE 0xa000
+
+#define PCI_VENDOR_ID_CMD 0x1095
+#define PCI_DEVICE_ID_CMD_640 0x0640
+#define PCI_DEVICE_ID_CMD_643 0x0643
+#define PCI_DEVICE_ID_CMD_646 0x0646
+#define PCI_DEVICE_ID_CMD_670 0x0670
+
+#define PCI_VENDOR_ID_VISION 0x1098
+#define PCI_DEVICE_ID_VISION_QD8500 0x0001
+#define PCI_DEVICE_ID_VISION_QD8580 0x0002
+
+#define PCI_VENDOR_ID_BROOKTREE 0x109e
+#define PCI_DEVICE_ID_BROOKTREE_848 0x0350
+#define PCI_DEVICE_ID_BROOKTREE_849A 0x0351
+#define PCI_DEVICE_ID_BROOKTREE_8474 0x8474
+
+#define PCI_VENDOR_ID_SIERRA 0x10a8
+#define PCI_DEVICE_ID_SIERRA_STB 0x0000
+
+#define PCI_VENDOR_ID_ACC 0x10aa
+#define PCI_DEVICE_ID_ACC_2056 0x0000
+
+#define PCI_VENDOR_ID_WINBOND 0x10ad
+#define PCI_DEVICE_ID_WINBOND_83769 0x0001
+#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
+#define PCI_DEVICE_ID_WINBOND_83C553 0x0565
+
+#define PCI_VENDOR_ID_DATABOOK 0x10b3
+#define PCI_DEVICE_ID_DATABOOK_87144 0xb106
+
+#define PCI_VENDOR_ID_PLX 0x10b5
+#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9080 0x9080
+
+#define PCI_DEVICE_ID_PLX_SPCOM200 0x1103
+
+#define PCI_VENDOR_ID_MADGE 0x10b6
+#define PCI_DEVICE_ID_MADGE_MK2 0x0002
+
+#define PCI_VENDOR_ID_3COM 0x10b7
+#define PCI_DEVICE_ID_3COM_3C339 0x3390
+#define PCI_DEVICE_ID_3COM_3C590 0x5900
+#define PCI_DEVICE_ID_3COM_3C595TX 0x5950
+#define PCI_DEVICE_ID_3COM_3C595T4 0x5951
+#define PCI_DEVICE_ID_3COM_3C595MII 0x5952
+#define PCI_DEVICE_ID_3COM_3C900TPO 0x9000
+#define PCI_DEVICE_ID_3COM_3C900COMBO 0x9001
+#define PCI_DEVICE_ID_3COM_3C905TX 0x9050
+#define PCI_DEVICE_ID_3COM_3C905T4 0x9051
+#define PCI_DEVICE_ID_3COM_3C905B_TX 0x9055
+
+#define PCI_VENDOR_ID_SMC 0x10b8
+#define PCI_DEVICE_ID_SMC_EPIC100 0x0005
+
+#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1445 0x1445
+#define PCI_DEVICE_ID_AL_M1449 0x1449
+#define PCI_DEVICE_ID_AL_M1451 0x1451
+#define PCI_DEVICE_ID_AL_M1461 0x1461
+#define PCI_DEVICE_ID_AL_M1489 0x1489
+#define PCI_DEVICE_ID_AL_M1511 0x1511
+#define PCI_DEVICE_ID_AL_M1513 0x1513
+#define PCI_DEVICE_ID_AL_M1521 0x1521
+#define PCI_DEVICE_ID_AL_M1523 0x1523
+#define PCI_DEVICE_ID_AL_M1531 0x1531
+#define PCI_DEVICE_ID_AL_M1533 0x1533
+#define PCI_DEVICE_ID_AL_M1541 0x1541
+#define PCI_DEVICE_ID_AL_M1543 0x1543
+#define PCI_DEVICE_ID_AL_M3307 0x3307
+#define PCI_DEVICE_ID_AL_M4803 0x5215
+#define PCI_DEVICE_ID_AL_M5219 0x5219
+#define PCI_DEVICE_ID_AL_M5229 0x5229
+#define PCI_DEVICE_ID_AL_M5237 0x5237
+#define PCI_DEVICE_ID_AL_M7101 0x7101
+
+#define PCI_VENDOR_ID_MITSUBISHI 0x10ba
+
+#define PCI_VENDOR_ID_SURECOM 0x10bd
+#define PCI_DEVICE_ID_SURECOM_NE34 0x0e34
+
+#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2070 0x0001
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128V 0x0002
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZV 0x0003
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2160 0x0004
+
+#define PCI_VENDOR_ID_ASP 0x10cd
+#define PCI_DEVICE_ID_ASP_ABP940 0x1200
+#define PCI_DEVICE_ID_ASP_ABP940U 0x1300
+#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300
+
+#define PCI_VENDOR_ID_MACRONIX 0x10d9
+#define PCI_DEVICE_ID_MACRONIX_MX98713 0x0512
+#define PCI_DEVICE_ID_MACRONIX_MX987x5 0x0531
+
+#define PCI_VENDOR_ID_CERN 0x10dc
+#define PCI_DEVICE_ID_CERN_SPSB_PMC 0x0001
+#define PCI_DEVICE_ID_CERN_SPSB_PCI 0x0002
+#define PCI_DEVICE_ID_CERN_HIPPI_DST 0x0021
+#define PCI_DEVICE_ID_CERN_HIPPI_SRC 0x0022
+
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+
+#define PCI_VENDOR_ID_IMS 0x10e0
+#define PCI_DEVICE_ID_IMS_8849 0x8849
+
+#define PCI_VENDOR_ID_TEKRAM2 0x10e1
+#define PCI_DEVICE_ID_TEKRAM2_690c 0x690c
+
+#define PCI_VENDOR_ID_TUNDRA 0x10e3
+#define PCI_DEVICE_ID_TUNDRA_CA91C042 0x0000
+
+#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_DEVICE_ID_AMCC_MYRINET 0x8043
+#define PCI_DEVICE_ID_AMCC_S5933 0x807d
+#define PCI_DEVICE_ID_AMCC_S5933_HEPC3 0x809c
+
+#define PCI_VENDOR_ID_INTERG 0x10ea
+#define PCI_DEVICE_ID_INTERG_1680 0x1680
+#define PCI_DEVICE_ID_INTERG_1682 0x1682
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#define PCI_DEVICE_ID_REALTEK_8029 0x8029
+#define PCI_DEVICE_ID_REALTEK_8129 0x8129
+#define PCI_DEVICE_ID_REALTEK_8139 0x8139
+
+#define PCI_VENDOR_ID_TRUEVISION 0x10fa
+#define PCI_DEVICE_ID_TRUEVISION_T1000 0x000c
+
+#define PCI_VENDOR_ID_INIT 0x1101
+#define PCI_DEVICE_ID_INIT_320P 0x9100
+#define PCI_DEVICE_ID_INIT_360P 0x9500
+
+#define PCI_VENDOR_ID_TTI 0x1103
+#define PCI_DEVICE_ID_TTI_HPT343 0x0003
+
+#define PCI_VENDOR_ID_VIA 0x1106
+#define PCI_DEVICE_ID_VIA_82C505 0x0505
+#define PCI_DEVICE_ID_VIA_82C561 0x0561
+#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
+#define PCI_DEVICE_ID_VIA_82C576 0x0576
+#define PCI_DEVICE_ID_VIA_82C585 0x0585
+#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
+#define PCI_DEVICE_ID_VIA_82C595 0x0595
+#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
+#define PCI_DEVICE_ID_VIA_82C598_0 0x0598
+#define PCI_DEVICE_ID_VIA_82C926 0x0926
+#define PCI_DEVICE_ID_VIA_82C416 0x1571
+#define PCI_DEVICE_ID_VIA_82C595_97 0x1595
+#define PCI_DEVICE_ID_VIA_82C586_2 0x3038
+#define PCI_DEVICE_ID_VIA_82C586_3 0x3040
+#define PCI_DEVICE_ID_VIA_86C100A 0x6100
+#define PCI_DEVICE_ID_VIA_82C597_1 0x8597
+#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
+
+#define PCI_VENDOR_ID_SMC2 0x1113
+#define PCI_DEVICE_ID_SMC2_1211TX 0x1211
+
+#define PCI_VENDOR_ID_VORTEX 0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003
+#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004
+#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007
+#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008
+#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b
+#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c
+#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP1 0x0110
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP1 0x0111
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP1 0x0112
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP1 0x0113
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP1 0x0114
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP1 0x0115
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP2 0x0120
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP2 0x0121
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP2 0x0122
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP2 0x0123
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP2 0x0124
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP2 0x0125
+
+#define PCI_VENDOR_ID_EF 0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002
+
+#define PCI_VENDOR_ID_FORE 0x1127
+#define PCI_DEVICE_ID_FORE_PCA200PC 0x0210
+#define PCI_DEVICE_ID_FORE_PCA200E 0x0300
+
+#define PCI_VENDOR_ID_IMAGINGTECH 0x112f
+#define PCI_DEVICE_ID_IMAGINGTECH_ICPCI 0x0000
+
+#define PCI_VENDOR_ID_PHILIPS 0x1131
+#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146
+
+#define PCI_VENDOR_ID_CYCLONE 0x113c
+#define PCI_DEVICE_ID_CYCLONE_SDK 0x0001
+
+#define PCI_VENDOR_ID_ALLIANCE 0x1142
+#define PCI_DEVICE_ID_ALLIANCE_PROMOTIO 0x3210
+#define PCI_DEVICE_ID_ALLIANCE_PROVIDEO 0x6422
+#define PCI_DEVICE_ID_ALLIANCE_AT24 0x6424
+#define PCI_DEVICE_ID_ALLIANCE_AT3D 0x643d
+
+#define PCI_VENDOR_ID_VMIC 0x114a
+#define PCI_DEVICE_ID_VMIC_VME 0x7587
+
+#define PCI_VENDOR_ID_DIGI 0x114f
+#define PCI_DEVICE_ID_DIGI_EPC 0x0002
+#define PCI_DEVICE_ID_DIGI_RIGHTSWITCH 0x0003
+#define PCI_DEVICE_ID_DIGI_XEM 0x0004
+#define PCI_DEVICE_ID_DIGI_XR 0x0005
+#define PCI_DEVICE_ID_DIGI_CX 0x0006
+#define PCI_DEVICE_ID_DIGI_XRJ 0x0009
+#define PCI_DEVICE_ID_DIGI_EPCJ 0x000a
+#define PCI_DEVICE_ID_DIGI_XR_920 0x0027
+
+#define PCI_VENDOR_ID_MUTECH 0x1159
+#define PCI_DEVICE_ID_MUTECH_MV1000 0x0001
+
+#define PCI_VENDOR_ID_RENDITION 0x1163
+#define PCI_DEVICE_ID_RENDITION_VERITE 0x0001
+#define PCI_DEVICE_ID_RENDITION_VERITE2100 0x2000
+
+#define PCI_VENDOR_ID_TOSHIBA 0x1179
+#define PCI_DEVICE_ID_TOSHIBA_601 0x0601
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
+
+#define PCI_VENDOR_ID_RICOH 0x1180
+#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
+
+#define PCI_VENDOR_ID_ARTOP 0x1191
+#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005
+
+#define PCI_VENDOR_ID_ZEITNET 0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221 0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225 0x0002
+
+#define PCI_VENDOR_ID_OMEGA 0x119b
+#define PCI_DEVICE_ID_OMEGA_82C092G 0x1221
+
+#define PCI_VENDOR_ID_LITEON 0x11ad
+#define PCI_DEVICE_ID_LITEON_LNE100TX 0x0002
+
+#define PCI_VENDOR_ID_NP 0x11bc
+#define PCI_DEVICE_ID_NP_PCI_FDDI 0x0001
+
+#define PCI_VENDOR_ID_ATT 0x11c1
+#define PCI_DEVICE_ID_ATT_L56XMF 0x0440
+
+#define PCI_VENDOR_ID_SPECIALIX 0x11cb
+#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000
+#define PCI_DEVICE_ID_SPECIALIX_XIO 0x4000
+#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000
+
+#define PCI_VENDOR_ID_AURAVISION 0x11d1
+#define PCI_DEVICE_ID_AURAVISION_VXP524 0x01f7
+
+#define PCI_VENDOR_ID_IKON 0x11d5
+#define PCI_DEVICE_ID_IKON_10115 0x0115
+#define PCI_DEVICE_ID_IKON_10117 0x0117
+
+#define PCI_VENDOR_ID_ZORAN 0x11de
+#define PCI_DEVICE_ID_ZORAN_36057 0x6057
+#define PCI_DEVICE_ID_ZORAN_36120 0x6120
+
+#define PCI_VENDOR_ID_KINETIC 0x11f4
+#define PCI_DEVICE_ID_KINETIC_2915 0x2915
+
+#define PCI_VENDOR_ID_COMPEX 0x11f6
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
+#define PCI_DEVICE_ID_COMPEX_RL2000 0x1401
+
+#define PCI_VENDOR_ID_RP 0x11fe
+#define PCI_DEVICE_ID_RP8OCTA 0x0001
+#define PCI_DEVICE_ID_RP8INTF 0x0002
+#define PCI_DEVICE_ID_RP16INTF 0x0003
+#define PCI_DEVICE_ID_RP32INTF 0x0004
+
+#define PCI_VENDOR_ID_CYCLADES 0x120e
+#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
+#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
+#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102
+#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103
+#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104
+#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105
+#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
+#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
+
+#define PCI_VENDOR_ID_ESSENTIAL 0x120f
+#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
+
+#define PCI_VENDOR_ID_O2 0x1217
+#define PCI_DEVICE_ID_O2_6832 0x6832
+
+#define PCI_VENDOR_ID_3DFX 0x121a
+#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
+#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002
+
+#define PCI_VENDOR_ID_SIGMADES 0x1236
+#define PCI_DEVICE_ID_SIGMADES_6425 0x6401
+
+#define PCI_VENDOR_ID_CCUBE 0x123f
+
+#define PCI_VENDOR_ID_DIPIX 0x1246
+
+#define PCI_VENDOR_ID_STALLION 0x124d
+#define PCI_DEVICE_ID_STALLION_ECHPCI832 0x0000
+#define PCI_DEVICE_ID_STALLION_ECHPCI864 0x0002
+#define PCI_DEVICE_ID_STALLION_EIOPCI 0x0003
+
+#define PCI_VENDOR_ID_OPTIBASE 0x1255
+#define PCI_DEVICE_ID_OPTIBASE_FORGE 0x1110
+#define PCI_DEVICE_ID_OPTIBASE_FUSION 0x1210
+#define PCI_DEVICE_ID_OPTIBASE_VPLEX 0x2110
+#define PCI_DEVICE_ID_OPTIBASE_VPLEXCC 0x2120
+#define PCI_DEVICE_ID_OPTIBASE_VQUEST 0x2130
+
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_88140 0x1400
+
+#define PCI_VENDOR_ID_SATSAGEM 0x1267
+#define PCI_DEVICE_ID_SATSAGEM_PCR2101 0x5352
+#define PCI_DEVICE_ID_SATSAGEM_TELSATTURBO 0x5a4b
+
+#define PCI_VENDOR_ID_ENSONIQ 0x1274
+#define PCI_DEVICE_ID_ENSONIQ_AUDIOPCI 0x5000
+
+#define PCI_VENDOR_ID_PICTUREL 0x12c5
+#define PCI_DEVICE_ID_PICTUREL_PCIVST 0x0081
+
+#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
+#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+
+#define PCI_VENDOR_ID_CBOARDS 0x1307
+#define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001
+
+#define PCI_VENDOR_ID_SYMPHONY 0x1c1c
+#define PCI_DEVICE_ID_SYMPHONY_101 0x0001
+
+#define PCI_VENDOR_ID_TEKRAM 0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+
+#define PCI_VENDOR_ID_3DLABS 0x3d3d
+#define PCI_DEVICE_ID_3DLABS_300SX 0x0001
+#define PCI_DEVICE_ID_3DLABS_500TX 0x0002
+#define PCI_DEVICE_ID_3DLABS_DELTA 0x0003
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA 0x0004
+#define PCI_DEVICE_ID_3DLABS_MX 0x0006
+
+#define PCI_VENDOR_ID_AVANCE 0x4005
+#define PCI_DEVICE_ID_AVANCE_ALG2064 0x2064
+#define PCI_DEVICE_ID_AVANCE_2302 0x2302
+
+#define PCI_VENDOR_ID_NETVIN 0x4a14
+#define PCI_DEVICE_ID_NETVIN_NV5000SC 0x5000
+
+#define PCI_VENDOR_ID_S3 0x5333
+#define PCI_DEVICE_ID_S3_PLATO_PXS 0x0551
+#define PCI_DEVICE_ID_S3_ViRGE 0x5631
+#define PCI_DEVICE_ID_S3_TRIO 0x8811
+#define PCI_DEVICE_ID_S3_AURORA64VP 0x8812
+#define PCI_DEVICE_ID_S3_TRIO64UVP 0x8814
+#define PCI_DEVICE_ID_S3_ViRGE_VX 0x883d
+#define PCI_DEVICE_ID_S3_868 0x8880
+#define PCI_DEVICE_ID_S3_928 0x88b0
+#define PCI_DEVICE_ID_S3_864_1 0x88c0
+#define PCI_DEVICE_ID_S3_864_2 0x88c1
+#define PCI_DEVICE_ID_S3_964_1 0x88d0
+#define PCI_DEVICE_ID_S3_964_2 0x88d1
+#define PCI_DEVICE_ID_S3_968 0x88f0
+#define PCI_DEVICE_ID_S3_TRIO64V2 0x8901
+#define PCI_DEVICE_ID_S3_PLATO_PXG 0x8902
+#define PCI_DEVICE_ID_S3_ViRGE_DXGX 0x8a01
+#define PCI_DEVICE_ID_S3_ViRGE_GX2 0x8a10
+#define PCI_DEVICE_ID_S3_ViRGE_MX 0x8c01
+#define PCI_DEVICE_ID_S3_ViRGE_MXP 0x8c02
+#define PCI_DEVICE_ID_S3_ViRGE_MXPMV 0x8c03
+#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00
+
+#define PCI_VENDOR_ID_DCI 0x6666
+#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_82375 0x0482
+#define PCI_DEVICE_ID_INTEL_82424 0x0483
+#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82430 0x0486
+#define PCI_DEVICE_ID_INTEL_82434 0x04a3
+#define PCI_DEVICE_ID_INTEL_I960 0x0960
+#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+#define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222
+#define PCI_DEVICE_ID_INTEL_7116 0x1223
+#define PCI_DEVICE_ID_INTEL_82596 0x1226
+#define PCI_DEVICE_ID_INTEL_82865 0x1227
+#define PCI_DEVICE_ID_INTEL_82557 0x1229
+#define PCI_DEVICE_ID_INTEL_82437 0x122d
+#define PCI_DEVICE_ID_INTEL_82371_0 0x122e
+#define PCI_DEVICE_ID_INTEL_82371_1 0x1230
+#define PCI_DEVICE_ID_INTEL_82371MX 0x1234
+#define PCI_DEVICE_ID_INTEL_82437MX 0x1235
+#define PCI_DEVICE_ID_INTEL_82441 0x1237
+#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
+#define PCI_DEVICE_ID_INTEL_82439 0x1250
+#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
+#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
+#define PCI_DEVICE_ID_INTEL_82437VX 0x7030
+#define PCI_DEVICE_ID_INTEL_82439TX 0x7100
+#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
+#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
+#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
+#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
+#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180
+#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181
+#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190
+#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191
+#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192
+#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71A0
+#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71A1
+#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71A2
+#define PCI_DEVICE_ID_INTEL_P6 0x84c4
+#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
+
+#define PCI_VENDOR_ID_KTI 0x8e2e
+#define PCI_DEVICE_ID_KTI_ET32P2 0x3000
+
+#define PCI_VENDOR_ID_ADAPTEC 0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178
+#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578
+#define PCI_DEVICE_ID_ADAPTEC_5800 0x5800
+#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038
+#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
+#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078
+#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178
+#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895
+#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
+#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578
+#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678
+#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778
+#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878
+#define PCI_DEVICE_ID_ADAPTEC_1030 0x8b78
+
+#define PCI_VENDOR_ID_ADAPTEC2 0x9005
+#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010
+#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011
+#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013
+#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f
+#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050
+#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051
+#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f
+#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080
+#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081
+#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083
+#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f
+#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0
+#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1
+#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3
+#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf
+
+#define PCI_VENDOR_ID_ATRONICS 0x907f
+#define PCI_DEVICE_ID_ATRONICS_2015 0x2015
+
+#define PCI_VENDOR_ID_HOLTEK 0x9412
+#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
+
+#define PCI_VENDOR_ID_TIGERJET 0xe159
+#define PCI_DEVICE_ID_TIGERJET_300 0x0001
+
+#define PCI_VENDOR_ID_ARK 0xedd8
+#define PCI_DEVICE_ID_ARK_STING 0xa091
+#define PCI_DEVICE_ID_ARK_STINGARK 0xa099
+#define PCI_DEVICE_ID_ARK_2000MT 0xa0a1
+
+#ifdef __KERNEL__
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices. The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ * 7:3 = slot
+ * 2:0 = function
+ */
+#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+
+/*
+ * There is one pci_dev structure for each slot-number/function-number
+ * combination:
+ */
+struct pci_dev {
+ struct pci_bus *bus; /* bus this device is on */
+ struct pci_dev *sibling; /* next device on this bus */
+ struct pci_dev *next; /* chain of all devices */
+
+ void *sysdata; /* hook for sys-specific extension */
+
+ unsigned int devfn; /* encoded device & function index */
+ unsigned short vendor;
+ unsigned short device;
+ unsigned int class; /* 3 bytes: (base,sub,prog-if) */
+ unsigned int master : 1; /* set if device is master capable */
+ /*
+ * In theory, the irq level can be read from configuration
+ * space and all would be fine. However, old PCI chips don't
+ * support these registers and return 0 instead. For example,
+ * the Vision864-P rev 0 chip can uses INTA, but returns 0 in
+ * the interrupt line and pin registers. pci_init()
+ * initializes this field with the value at PCI_INTERRUPT_LINE
+ * and it is the job of pcibios_fixup() to change it if
+ * necessary. The field must not be 0 unless the device
+ * cannot generate interrupts at all.
+ */
+ unsigned char irq; /* irq generated by this device */
+};
+
+struct pci_bus {
+ struct pci_bus *parent; /* parent bus this bridge is on */
+ struct pci_bus *children; /* chain of P2P bridges on this bus */
+ struct pci_bus *next; /* chain of all PCI buses */
+
+ struct pci_dev *self; /* bridge device as seen by parent */
+ struct pci_dev *devices; /* devices behind this bridge */
+
+ void *sysdata; /* hook for sys-specific extension */
+
+ unsigned char number; /* bus number */
+ unsigned char primary; /* number of primary bridge */
+ unsigned char secondary; /* number of secondary bridge */
+ unsigned char subordinate; /* max number of subordinate buses */
+};
+
+/*
+ * This is used to map a vendor-id/device-id pair into device-specific
+ * information.
+ */
+struct pci_dev_info {
+ unsigned short vendor; /* vendor id */
+ unsigned short device; /* device id */
+
+ const char *name; /* device name */
+ unsigned char bridge_type; /* bridge type or 0xff */
+};
+
+extern struct pci_bus pci_root; /* root bus */
+extern struct pci_dev *pci_devices; /* list of all devices */
+
+
+extern unsigned long pci_init (unsigned long mem_start, unsigned long mem_end);
+
+extern struct pci_dev_info *pci_lookup_dev (unsigned int vendor,
+ unsigned int dev);
+extern const char *pci_strclass (unsigned int class);
+extern const char *pci_strvendor (unsigned int vendor);
+extern const char *pci_strdev (unsigned int vendor, unsigned int device);
+
+extern int get_pci_list (char *buf);
+
+#endif /* __KERNEL__ */
+#endif /* LINUX_PCI_H */
diff --git a/pcnet32/pcnet32.c b/pcnet32/pcnet32.c
new file mode 100644
index 000000000..c9a538615
--- /dev/null
+++ b/pcnet32/pcnet32.c
@@ -0,0 +1,1012 @@
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ * Copyright 1996,97 Thomas Bogendoerfer, 1993-1995,1998 Donald Becker
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * Derived from the lance driver written 1993-1995 by Donald Becker.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ * This driver is for AMD PCnet-PCI based ethercards
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <error.h>
+#include <pciaccess.h>
+
+#include "mach_U.h"
+
+#include <sys/io.h>
+#include <hurd.h>
+#include <mach.h>
+#include <cthreads.h>
+
+#include "linux-types.h"
+#include "if_ether.h"
+#include "pci.h"
+#include "netdevice.h"
+#include "skbuff.h"
+#include "bitops.h"
+#include "skbuff.h"
+#include "irq.h"
+#include "util.h"
+
+#include "device_U.h"
+
+static const char *version = "pcnet32.c:v0.99B 4/4/98 DJBecker/TSBogend.\n";
+
+/* A few user-configurable values. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#define PCNET_LOG_TX_BUFFERS 4
+#define PCNET_LOG_RX_BUFFERS 4
+
+/* Driver verbosity level. 0 = no messages, 7 = wordy death.
+ Modify here, or when loading as a module. */
+static int pcnet32_debug = 1;
+
+/*
+ * Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * History:
+ * v0.01: Initial version
+ * only tested on Alpha Noname Board
+ * v0.02: changed IRQ handling for new interrupt scheme (dev_id)
+ * tested on a ASUS SP3G
+ * v0.10: fixed an odd problem with the 79C794 in a Compaq Deskpro XL
+ * looks like the 974 doesn't like stopping and restarting in a
+ * short period of time; now we do a reinit of the lance; the
+ * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
+ * and hangs the machine (thanks to Klaus Liedl for debugging)
+ * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32,
+ * made it standalone (no need for lance.c)
+ * v0.13: added additional PCI detecting for special PCI devices (Compaq)
+ * v0.14: stripped down additional PCI probe (thanks to David C Niemi
+ * and sveneric@xs4all.nl for testing this on their Compaq boxes)
+ * v0.15: added 79C965 (VLB) probe
+ * added interrupt sharing for PCI chips
+ * v0.16: fixed set_multicast_list on Alpha machines
+ * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
+ * v0.19: changed setting of autoselect bit
+ * v0.20: removed additional Compaq PCI probe; there is now a working one
+ * in arch/i386/bios32.c
+ * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu
+ * v0.22: added printing of status to ring dump
+ * v0.23: changed enet_statistics to net_devive_stats
+ * v0.99: Changes for 2.0.34 final release. -djb
+ */
+
+void netif_rx (struct sk_buff *skb);
+
+#ifndef __powerpc__
+#define le16_to_cpu(val) (val)
+#define le32_to_cpu(val) (val)
+#endif
+#if (LINUX_VERSION_CODE < 0x20123)
+//#define test_and_set_bit(val, addr) set_bit(val, addr)
+#endif
+
+#define printk(format, ...) do \
+{ \
+ fprintf (stderr , format, ## __VA_ARGS__); \
+ fflush (stderr); \
+} while (0)
+
+// convertion between the virtual address and the physical address
+#define LP_VIRT_TO_BUS(addr,lp) ((vm_address_t) (addr) - (vm_address_t) (lp) + (lp)->paddr)
+#define BUF_VIRT_TO_BUS(addr,lp) ((vm_address_t) (addr) - (lp)->rx_buffs + (lp)->rx_buffs_paddr)
+#define BUF_BUS_TO_VIRT(addr,lp) ((vm_address_t) (addr) - (lp)->rx_buffs_paddr + (lp)->rx_buffs)
+#define SKB_VIRT_TO_BUS(addr,skb) virt_to_phys (addr)
+
+#define eth_copy_and_sum(dest, src, length, base) \
+ memcpy((dest)->data, src, length)
+
+// TODO I hope it is OK.
+#define eth_type_trans(skb,dev) 0
+
+#define TX_RING_SIZE (1 << (PCNET_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((PCNET_LOG_TX_BUFFERS) << 12)
+
+#define RX_RING_SIZE (1 << (PCNET_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((PCNET_LOG_RX_BUFFERS) << 4)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+enum pcnet_offsets { PCNET32_DATA=0x10, PCNET32_ADDR=0x12, PCNET32_RESET=0x14,
+ PCNET32_BUS_IF=0x16,};
+#define PCNET32_TOTAL_SIZE 0x20
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+ u32 base;
+ s16 buf_length;
+ s16 status;
+ u32 msg_length;
+ u32 reserved;
+};
+
+struct pcnet32_tx_head {
+ u32 base;
+ s16 length;
+ s16 status;
+ u32 misc;
+ u32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+ u16 mode;
+ u16 tlen_rlen;
+ u8 phys_addr[6];
+ u16 reserved;
+ u32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring;
+ u32 tx_ring;
+};
+
+struct pcnet32_private {
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries
+ in 32bit mode. */
+ struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
+ struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
+ struct pcnet32_init_block init_block;
+ const char *name;
+ struct device *next_module;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
+ vm_address_t rx_buffs_paddr; /* The physical address of the buffers above */
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ struct enet_statistics stats;
+ char tx_full;
+ unsigned long lock;
+
+ /* The physical address of the structure. */
+ vm_address_t paddr;
+};
+
+static struct pcnet_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x2420, "PCnet/PCI 79C970", 0},
+ {0x2430, "PCnet32", 0},
+ {0x2621, "PCnet/PCI II 79C970A", 0},
+ {0x2623, "PCnet/FAST 79C971", 0},
+ {0x2624, "PCnet/FAST+ 79C972", 0},
+ {0x0, "PCnet32 (unknown)", 0},
+};
+
+/* Index of functions. */
+int pcnet32_probe(struct device *dev);
+void pcnet32_interrupt(int irq);
+static int pcnet32_probe1(struct device *dev, unsigned int ioaddr, unsigned char irq_line);
+static int pcnet32_open(struct device *dev);
+static void pcnet32_init_ring(struct device *dev);
+static int pcnet32_start_xmit(struct sk_buff *skb, struct device *dev);
+static int pcnet32_rx(struct device *dev);
+static int pcnet32_close(struct device *dev);
+static struct enet_statistics *pcnet32_get_stats(struct device *dev);
+static void pcnet32_set_multicast_list(struct device *dev);
+
+struct device * init_etherdev(struct device *dev, int sizeof_priv);
+
+mach_port_t master_device;
+mach_port_t priv_host;
+
+struct device *ether_dev;
+
+struct mutex global_lock = MUTEX_INITIALIZER;
+struct mutex skb_queue_lock = MUTEX_INITIALIZER;
+
+
+/* A list of all installed PCnet32 devices, for removing the driver module. */
+static struct device *root_pcnet32_dev = NULL;
+
+int check_region(unsigned int from, unsigned int num)
+{
+ // check the ioport region before probing
+ // it isn't needed for this test program.
+ return 0;
+}
+
+void request_region(unsigned int from, unsigned int num, const char *name)
+{
+}
+
+int pcnet32_probe (struct device *dev)
+{
+ int cards_found = 0;
+ error_t err;
+ struct pci_device *pci_dev;
+ struct pci_device_iterator *dev_iter;
+
+ dev_iter = pci_slot_match_iterator_create (NULL);
+ while ((pci_dev = pci_device_next (dev_iter)) != NULL) {
+ u8 irq_line;
+ u16 pci_command, new_command;
+ u32 pci_ioaddr;
+
+ if (pci_dev->vendor_id != PCI_VENDOR_ID_AMD)
+ continue;
+ if (pci_dev->device_id != PCI_DEVICE_ID_AMD_LANCE)
+ continue;
+
+ err = pci_device_cfg_read_u8 (pci_dev, &irq_line,
+ PCI_INTERRUPT_LINE);
+ if (err) {
+ error (0, err, "pci_device_cfg_read");
+ break;
+ }
+
+ err = pci_device_cfg_read_u32 (pci_dev, &pci_ioaddr,
+ PCI_BASE_ADDRESS_0);
+ if (err) {
+ error (0, err, "pci_device_cfg_read");
+ break;
+ }
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+
+ /* Avoid already found cards from previous pcnet32_probe() calls */
+ if (check_region(pci_ioaddr, PCNET32_TOTAL_SIZE))
+ continue;
+
+ /* Activate the card: fix for brain-damaged Win98 BIOSes. */
+ err = pci_device_cfg_read_u16 (pci_dev, &pci_command,
+ PCI_COMMAND);
+ if (err) {
+ error (0, err, "pci_device_cfg_read");
+ break;
+ }
+ new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+ if (pci_command != new_command) {
+ printk(" The PCI BIOS has not enabled the AMD Ethernet"
+ " device at %2x-%2x."
+ " Updating PCI command %4.4x->%4.4x.\n",
+ pci_dev->bus, pci_dev->func,
+ pci_command, new_command);
+ err = pci_device_cfg_write_u16 (pci_dev, new_command,
+ PCI_COMMAND);
+ if (err) {
+ error (0, err, "pci_device_cfg_write");
+ break;
+ }
+ }
+
+ if (pcnet32_probe1(dev, pci_ioaddr, irq_line) != 0) {
+ /* Should never happen. */
+ printk("pcnet32.c: Probe of PCI card at %#x failed.\n",
+ pci_ioaddr);
+ } else
+ dev = 0;
+ cards_found++;
+ }
+ pci_iterator_destroy (dev_iter);
+
+ return cards_found ? 0 : -ENODEV;
+}
+
+
+/* pcnet32_probe1 */
+static int pcnet32_probe1(struct device *dev, unsigned int ioaddr, unsigned char irq_line)
+{
+ struct pcnet32_private *lp;
+ int i;
+ const char *chipname;
+ vm_address_t lp_vaddr;
+ vm_address_t lp_paddr;
+ vm_address_t buf_vaddr;
+ vm_address_t buf_paddr;
+ error_t err;
+
+ /* Make all io ports accessible. */
+ if (ioperm (ioaddr, PCNET32_TOTAL_SIZE, 1) < 0)
+ return ENODEV;
+
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) != 0x57) || (inb(ioaddr + 15) != 0x57))
+ return ENODEV;
+
+ inw(ioaddr+PCNET32_RESET); /* Reset the PCNET32 */
+
+ outw(0x0000, ioaddr+PCNET32_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+PCNET32_DATA) != 0x0004)
+ return ENODEV;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+PCNET32_ADDR);
+ if (inw(ioaddr+PCNET32_ADDR) != 88) {
+ /* should never happen */
+ return ENODEV;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+PCNET32_DATA);
+ outw(89, ioaddr+PCNET32_ADDR);
+ chip_version |= inw(ioaddr+PCNET32_DATA) << 16;
+ if (pcnet32_debug > 2)
+ printk(" PCnet chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return ENODEV;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (i = 0; chip_table[i].id_number; i++)
+ if (chip_table[i].id_number == chip_version)
+ break;
+ chipname = chip_table[i].name;
+ }
+
+ dev = init_etherdev(dev, 0);
+ ether_dev = dev;
+
+ printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ printk("\n");
+
+ dev->base_addr = ioaddr;
+ request_region(ioaddr, PCNET32_TOTAL_SIZE, dev->name);
+
+ /* Data structures used by the PCnet32 are 16byte aligned and DMAble. */
+ err = vm_dma_buff_alloc (priv_host, mach_task_self (),
+ sizeof(*lp)+15, &lp_vaddr, &lp_paddr);
+ if (err)
+ debug ("cannot allocate memory for the DMA buffer.");
+ lp = (struct pcnet32_private *) lp_vaddr;
+ // (((unsigned long)kmalloc(sizeof(*lp)+15, GFP_DMA | GFP_KERNEL)+15) & ~15);
+
+ memset(lp, 0, sizeof(*lp));
+ dev->priv = lp;
+ lp->paddr = lp_paddr;
+
+ lp->next_module = root_pcnet32_dev;
+ root_pcnet32_dev = dev;
+
+ lp->name = chipname;
+
+ err = vm_dma_buff_alloc (priv_host, mach_task_self (),
+ PKT_BUF_SZ*RX_RING_SIZE, &buf_vaddr, &buf_paddr);
+ if (err)
+ debug ("cannot allocate memory for the DMA buffer.");
+ lp->rx_buffs = buf_vaddr;
+ lp->rx_buffs_paddr = buf_paddr;
+// lp->rx_buffs = (unsigned long) kmalloc(PKT_BUF_SZ*RX_RING_SIZE, GFP_DMA | GFP_KERNEL);
+
+ lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
+ lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+// lp->init_block.rx_ring = (u32)le32_to_cpu(virt_to_bus(lp->rx_ring));
+// lp->init_block.tx_ring = (u32)le32_to_cpu(virt_to_bus(lp->tx_ring));
+ lp->init_block.rx_ring = (u32)le32_to_cpu(LP_VIRT_TO_BUS (lp->rx_ring, lp));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(LP_VIRT_TO_BUS (lp->tx_ring, lp));
+
+ /* switch pcnet32 to 32bit mode */
+ outw(0x0014, ioaddr+PCNET32_ADDR);
+ outw(0x0002, ioaddr+PCNET32_BUS_IF);
+
+ outw(0x0001, ioaddr+PCNET32_ADDR);
+ inw(ioaddr+PCNET32_ADDR);
+ outw(LP_VIRT_TO_BUS(&lp->init_block, lp) & 0xffff, ioaddr+PCNET32_DATA);
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ inw(ioaddr+PCNET32_ADDR);
+ outw(LP_VIRT_TO_BUS(&lp->init_block, lp) >> 16, ioaddr+PCNET32_DATA);
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ inw(ioaddr+PCNET32_ADDR);
+
+ dev->irq = irq_line;
+
+ if (pcnet32_debug > 0)
+ printk(version);
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->open = &pcnet32_open;
+ dev->hard_start_xmit = &pcnet32_start_xmit;
+ dev->stop = &pcnet32_close;
+ dev->get_stats = &pcnet32_get_stats;
+ dev->set_multicast_list = &pcnet32_set_multicast_list;
+
+ device_irq_enable (master_device, irq_line, TRUE);
+ /* Fill in the generic fields of the device structure. */
+ ether_setup(dev);
+ return 0;
+}
+
+
+static int
+pcnet32_open(struct device *dev)
+{
+#define SA_SHIRQ 0x04000000
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev, &pcnet32_interrupt, SA_SHIRQ)) {
+ return -EAGAIN;
+ }
+
+ /* Reset the PCNET32 */
+ inw(ioaddr+PCNET32_RESET);
+
+ /* switch pcnet32 to 32bit mode */
+ outw(0x0014, ioaddr+PCNET32_ADDR);
+ outw(0x0002, ioaddr+PCNET32_BUS_IF);
+
+ /* Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ /* only touch autoselect bit */
+ outw(inw(ioaddr+PCNET32_BUS_IF) | 0x0002, ioaddr+PCNET32_BUS_IF);
+
+ if (pcnet32_debug > 1)
+ printk("%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq,
+ (u32) LP_VIRT_TO_BUS(lp->tx_ring, lp),
+ (u32) LP_VIRT_TO_BUS(lp->rx_ring, lp),
+ (u32) LP_VIRT_TO_BUS(&lp->init_block, lp));
+
+ /* check for ATLAS T1/E1 LAW card */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && dev->dev_addr[2] == 0x75) {
+ /* select GPSI mode */
+ lp->init_block.mode = 0x0100;
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ outw(inw(ioaddr+PCNET32_BUS_IF) & ~2, ioaddr+PCNET32_BUS_IF);
+ /* switch full duplex on */
+ outw(0x0009, ioaddr+PCNET32_ADDR);
+ outw(inw(ioaddr+PCNET32_BUS_IF) | 1, ioaddr+PCNET32_BUS_IF);
+ } else
+ lp->init_block.mode = 0x0000;
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ pcnet32_init_ring(dev);
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ outw(0x0001, ioaddr+PCNET32_ADDR);
+ outw(LP_VIRT_TO_BUS(&lp->init_block, lp) &0xffff, ioaddr+PCNET32_DATA);
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ outw(LP_VIRT_TO_BUS(&lp->init_block, lp) >> 16, ioaddr+PCNET32_DATA);
+
+ outw(0x0004, ioaddr+PCNET32_ADDR);
+ outw(0x0915, ioaddr+PCNET32_DATA);
+
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ outw(0x0001, ioaddr+PCNET32_DATA);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+PCNET32_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+PCNET32_DATA);
+
+ if (pcnet32_debug > 2)
+ printk("%s: PCNET32 open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) LP_VIRT_TO_BUS(&lp->init_block, lp), inw(ioaddr+PCNET32_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.). Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting. As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit. It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+ */
+
+static void
+pcnet32_purge_tx_ring(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb(lp->tx_skbuff[i], FREE_WRITE);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static void
+pcnet32_init_ring(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ int i;
+
+ lp->lock = 0, lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_ring[i].base = (u32)le32_to_cpu(BUF_VIRT_TO_BUS((char *)lp->rx_buffs + i*PKT_BUF_SZ, lp));
+ lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+ lp->rx_ring[i].status = le16_to_cpu(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_ring[i].base = 0;
+ lp->tx_ring[i].status = 0;
+ }
+
+ lp->init_block.tlen_rlen = TX_RING_LEN_BITS | RX_RING_LEN_BITS;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.rx_ring = (u32)le32_to_cpu(LP_VIRT_TO_BUS(lp->rx_ring, lp));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(LP_VIRT_TO_BUS(lp->tx_ring, lp));
+}
+
+static void
+pcnet32_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ int i;
+ unsigned int ioaddr = dev->base_addr;
+
+ pcnet32_purge_tx_ring(dev);
+ pcnet32_init_ring(dev);
+
+ outw(0x0000, ioaddr + PCNET32_ADDR);
+ /* ReInit Ring */
+ outw(0x0001, ioaddr + PCNET32_DATA);
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+PCNET32_DATA) & 0x0100)
+ break;
+
+ outw(csr0_bits, ioaddr + PCNET32_DATA);
+}
+
+static int
+pcnet32_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int entry;
+// unsigned long flags;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ // TODO is it OK to simply comment them?
+// int tickssofar = jiffies - dev->trans_start;
+// if (tickssofar < 20)
+// return 1;
+ outw(0, ioaddr+PCNET32_ADDR);
+ printk("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw(ioaddr+PCNET32_DATA));
+ outw(0x0004, ioaddr+PCNET32_DATA);
+ lp->stats.tx_errors++;
+#ifndef final_version
+ {
+ int i;
+ printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0 ; i < RX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length, (unsigned)lp->rx_ring[i].status);
+ for (i = 0 ; i < TX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc, (unsigned)lp->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+ pcnet32_restart(dev, 0x0042, 1);
+
+ dev->tbusy = 0;
+ // TODO maybe I should uncomment it later.
+// dev->trans_start = jiffies;
+
+ return 0;
+ }
+
+ if (pcnet32_debug > 3) {
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ printk("%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+PCNET32_DATA));
+ outw(0x0000, ioaddr+PCNET32_DATA);
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ if (test_and_set_bit(0, (void*)&lp->lock) != 0) {
+ if (pcnet32_debug > 0)
+ printk("%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return 1;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
+
+ lp->tx_ring[entry].misc = 0x00000000;
+
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = (u32)le32_to_cpu(SKB_VIRT_TO_BUS(skb->data, skb));
+ lp->tx_ring[entry].status = le16_to_cpu(0x8300);
+
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ outw(0x0048, ioaddr+PCNET32_DATA);
+
+// dev->trans_start = jiffies;
+
+// save_flags(flags);
+// cli();
+ mutex_lock (&global_lock);
+ lp->lock = 0;
+ if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
+ clear_bit(0, (void*)&dev->tbusy);
+ else
+ lp->tx_full = 1;
+// restore_flags(flags);
+ mutex_unlock (&global_lock);
+
+ return 0;
+}
+
+/* The PCNET32 interrupt handler. */
+void
+pcnet32_interrupt(int irq)
+{
+ extern mach_port_t master_device;
+ error_t err;
+ struct device *dev = ether_dev;
+ struct pcnet32_private *lp;
+ unsigned int csr0, ioaddr;
+ int boguscnt = max_interrupt_work;
+ int must_restart;
+
+ if (dev == NULL) {
+ printk ("pcnet32_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct pcnet32_private *)dev->priv;
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ outw(0x00, dev->base_addr + PCNET32_ADDR);
+ while ((csr0 = inw(dev->base_addr + PCNET32_DATA)) & 0x8600
+ && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + PCNET32_DATA);
+
+ must_restart = 0;
+
+ if (pcnet32_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + PCNET32_DATA));
+
+ if (csr0 & 0x0400) { /* Rx interrupt */
+ pcnet32_rx(dev);
+ }
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ mutex_lock (&global_lock);
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was an major error, log it. */
+ int err_status = le16_to_cpu(lp->tx_ring[entry].misc);
+ lp->stats.tx_errors++;
+ if (err_status & 0x04000000) lp->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000) lp->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000) lp->stats.tx_window_errors++;
+ if (err_status & 0x40000000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ lp->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x1800)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ clear_bit(0, (void*)&dev->tbusy);
+ // TODO comment it temporarily
+// mark_bh(NET_BH);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ mutex_unlock (&global_lock);
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * this happens when our receive ring is full. This
+ * shouldn't be a problem as we will see normal rx
+ * interrupts for the frames in the receive ring. But
+ * there are some PCI chipsets (I can reproduce this
+ * on SP3G with Intel saturn chipset) which have some-
+ * times problems and will fill up the receive ring
+ * with error descriptors. In this situation we don't
+ * get a rx interrupt, but a missed frame interrupt
+ * sooner or later. So we try to clean up our receive
+ * ring here.
+ */
+ pcnet32_rx(dev);
+ lp->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + PCNET32_ADDR);
+ outw(0x0004, dev->base_addr + PCNET32_DATA);
+ pcnet32_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + PCNET32_ADDR);
+ outw(0x7940, dev->base_addr + PCNET32_DATA);
+
+ if (pcnet32_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + PCNET32_ADDR),
+ inw(dev->base_addr + PCNET32_DATA));
+
+ dev->interrupt = 0;
+ err = device_irq_enable (master_device, irq, TRUE);
+ return;
+}
+
+static int
+pcnet32_rx(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
+ int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net-2e. */
+ short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len < 60) {
+ printk("%s: Runt packet!\n",dev->name);
+ lp->stats.rx_errors++;
+ } else {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if ((short)le16_to_cpu(lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].status) < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)BUF_BUS_TO_VIRT(le32_to_cpu(lp->rx_ring[entry].base), lp),
+ pkt_len,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+pcnet32_close(struct device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+
+ dev->start = 0;
+ set_bit(0, (void*)&dev->tbusy);
+
+ outw(112, ioaddr+PCNET32_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+PCNET32_DATA);
+
+ outw(0, ioaddr+PCNET32_ADDR);
+
+ if (pcnet32_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+PCNET32_DATA));
+
+ /* We stop the PCNET32 here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+PCNET32_DATA);
+
+ free_irq(dev);
+
+ return 0;
+}
+
+static struct enet_statistics *pcnet32_get_stats(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ unsigned int ioaddr = dev->base_addr;
+ unsigned short saved_addr;
+// unsigned long flags;
+
+// save_flags(flags);
+// cli();
+ mutex_lock (&global_lock);
+ saved_addr = inw(ioaddr+PCNET32_ADDR);
+ outw(112, ioaddr+PCNET32_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+PCNET32_DATA);
+ outw(saved_addr, ioaddr+PCNET32_ADDR);
+ mutex_unlock (&global_lock);
+// restore_flags(flags);
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void pcnet32_set_multicast_list(struct device *dev)
+{
+#define IFF_PROMISC 0x100
+#define IFF_ALLMULTI 0x200
+ unsigned int ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ lp->init_block.mode |= 0x8000;
+ } else {
+ int num_addrs=dev->mc_count;
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(lp->init_block.filter , (num_addrs == 0) ? 0 : -1, sizeof(lp->init_block.filter));
+ lp->init_block.mode &= ~0x8000;
+ }
+
+ outw(0, ioaddr+PCNET32_ADDR);
+ outw(0x0004, ioaddr+PCNET32_DATA); /* Temporarily stop the lance. */
+
+ pcnet32_restart(dev, 0x0042, 0); /* Resume normal operation */
+
+}
+
+boolean_t
+ioperm_ports ()
+{
+ return TRUE;
+}
diff --git a/pcnet32/pcnet32.prof_d b/pcnet32/pcnet32.prof_d
new file mode 100644
index 000000000..3946e20f5
--- /dev/null
+++ b/pcnet32/pcnet32.prof_d
@@ -0,0 +1 @@
+pcnet32.prof: pcnet32_p.o net_init_p.o deviceUser_p.o machUser_p.o irq_p.o net_p.o main_p.o ds_routines_p.o queue_p.o device_replyUser_p.o deviceServer_p.o notifyServer_p.o kmem_p.o ../libthreads/libthreads_p.a ../libports/libports_p.a ../libfshelp/libfshelp_p.a ../libshouldbeinlibc/libshouldbeinlibc_p.a ../libtrivfs/libtrivfs_p.a
diff --git a/pcnet32/queue.c b/pcnet32/queue.c
new file mode 100644
index 000000000..a43a21b0e
--- /dev/null
+++ b/pcnet32/queue.c
@@ -0,0 +1,131 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Routines to implement queue package.
+ */
+
+#include "queue.h"
+
+
+
+/*
+ * Insert element at head of queue.
+ */
+void enqueue_head(
+ register queue_t que,
+ register queue_entry_t elt)
+{
+ elt->next = que->next;
+ elt->prev = que;
+ elt->next->prev = elt;
+ que->next = elt;
+}
+
+/*
+ * Insert element at tail of queue.
+ */
+void enqueue_tail(
+ register queue_t que,
+ register queue_entry_t elt)
+{
+ elt->next = que;
+ elt->prev = que->prev;
+ elt->prev->next = elt;
+ que->prev = elt;
+}
+
+/*
+ * Remove and return element at head of queue.
+ */
+queue_entry_t dequeue_head(
+ register queue_t que)
+{
+ register queue_entry_t elt;
+
+ if (que->next == que)
+ return((queue_entry_t)0);
+
+ elt = que->next;
+ elt->next->prev = que;
+ que->next = elt->next;
+ return(elt);
+}
+
+/*
+ * Remove and return element at tail of queue.
+ */
+queue_entry_t dequeue_tail(
+ register queue_t que)
+{
+ register queue_entry_t elt;
+
+ if (que->prev == que)
+ return((queue_entry_t)0);
+
+ elt = que->prev;
+ elt->prev->next = que;
+ que->prev = elt->prev;
+ return(elt);
+}
+
+/*
+ * Remove arbitrary element from queue.
+ * Does not check whether element is on queue - the world
+ * will go haywire if it isn't.
+ */
+
+/*ARGSUSED*/
+void remqueue(
+ queue_t que,
+ register queue_entry_t elt)
+{
+ elt->next->prev = elt->prev;
+ elt->prev->next = elt->next;
+}
+
+/*
+ * Routines to directly imitate the VAX hardware queue
+ * package.
+ */
+void insque(
+ register struct queue_entry *entry,
+ register struct queue_entry *pred)
+{
+ entry->next = pred->next;
+ entry->prev = pred;
+ (pred->next)->prev = entry;
+ pred->next = entry;
+}
+
+struct queue_entry
+*remque(
+ register struct queue_entry *elt)
+{
+ (elt->next)->prev = elt->prev;
+ (elt->prev)->next = elt->next;
+ return(elt);
+}
+
diff --git a/pcnet32/queue.h b/pcnet32/queue.h
new file mode 100644
index 000000000..0637dede0
--- /dev/null
+++ b/pcnet32/queue.h
@@ -0,0 +1,370 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon rights
+ * to redistribute these changes.
+ */
+/*
+ * File: queue.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Type definitions for generic queues.
+ *
+ */
+
+#ifndef _KERN_QUEUE_H_
+#define _KERN_QUEUE_H_
+
+#include <cthreads.h>
+
+/*
+ * Queue of abstract objects. Queue is maintained
+ * within that object.
+ *
+ * Supports fast removal from within the queue.
+ *
+ * How to declare a queue of elements of type "foo_t":
+ * In the "*foo_t" type, you must have a field of
+ * type "queue_chain_t" to hold together this queue.
+ * There may be more than one chain through a
+ * "foo_t", for use by different queues.
+ *
+ * Declare the queue as a "queue_t" type.
+ *
+ * Elements of the queue (of type "foo_t", that is)
+ * are referred to by reference, and cast to type
+ * "queue_entry_t" within this module.
+ */
+
+/*
+ * A generic doubly-linked list (queue).
+ */
+
+struct queue_entry {
+ struct queue_entry *next; /* next element */
+ struct queue_entry *prev; /* previous element */
+};
+
+typedef struct queue_entry *queue_t;
+typedef struct queue_entry queue_head_t;
+typedef struct queue_entry queue_chain_t;
+typedef struct queue_entry *queue_entry_t;
+
+/*
+ * enqueue puts "elt" on the "queue".
+ * dequeue returns the first element in the "queue".
+ * remqueue removes the specified "elt" from the specified "queue".
+ */
+
+#define enqueue(queue,elt) enqueue_tail(queue, elt)
+#define dequeue(queue) dequeue_head(queue)
+
+void enqueue_head(queue_t, queue_entry_t);
+void enqueue_tail(queue_t, queue_entry_t);
+queue_entry_t dequeue_head(queue_t);
+queue_entry_t dequeue_tail(queue_t);
+void remqueue(queue_t, queue_entry_t);
+void insque(queue_entry_t, queue_entry_t);
+
+/*
+ * Macro: queue_init
+ * Function:
+ * Initialize the given queue.
+ * Header:
+ * void queue_init(q)
+ * queue_t q; *MODIFIED*
+ */
+#define queue_init(q) ((q)->next = (q)->prev = q)
+
+/*
+ * Macro: queue_first
+ * Function:
+ * Returns the first entry in the queue,
+ * Header:
+ * queue_entry_t queue_first(q)
+ * queue_t q; *IN*
+ */
+#define queue_first(q) ((q)->next)
+
+/*
+ * Macro: queue_next
+ * Function:
+ * Returns the entry after an item in the queue.
+ * Header:
+ * queue_entry_t queue_next(qc)
+ * queue_t qc;
+ */
+#define queue_next(qc) ((qc)->next)
+
+/*
+ * Macro: queue_last
+ * Function:
+ * Returns the last entry in the queue.
+ * Header:
+ * queue_entry_t queue_last(q)
+ * queue_t q; *IN*
+ */
+#define queue_last(q) ((q)->prev)
+
+/*
+ * Macro: queue_prev
+ * Function:
+ * Returns the entry before an item in the queue.
+ * Header:
+ * queue_entry_t queue_prev(qc)
+ * queue_t qc;
+ */
+#define queue_prev(qc) ((qc)->prev)
+
+/*
+ * Macro: queue_end
+ * Function:
+ * Tests whether a new entry is really the end of
+ * the queue.
+ * Header:
+ * boolean_t queue_end(q, qe)
+ * queue_t q;
+ * queue_entry_t qe;
+ */
+#define queue_end(q, qe) ((q) == (qe))
+
+/*
+ * Macro: queue_empty
+ * Function:
+ * Tests whether a queue is empty.
+ * Header:
+ * boolean_t queue_empty(q)
+ * queue_t q;
+ */
+#define queue_empty(q) queue_end((q), queue_first(q))
+
+
+/*----------------------------------------------------------------*/
+/*
+ * Macros that operate on generic structures. The queue
+ * chain may be at any location within the structure, and there
+ * may be more than one chain.
+ */
+
+/*
+ * Macro: queue_enter
+ * Function:
+ * Insert a new element at the tail of the queue.
+ * Header:
+ * void queue_enter(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ */
+#define queue_enter(head, elt, type, field) \
+{ \
+ register queue_entry_t prev; \
+ \
+ prev = (head)->prev; \
+ if ((head) == prev) { \
+ (head)->next = (queue_entry_t) (elt); \
+ } \
+ else { \
+ ((type)prev)->field.next = (queue_entry_t)(elt);\
+ } \
+ (elt)->field.prev = prev; \
+ (elt)->field.next = head; \
+ (head)->prev = (queue_entry_t) elt; \
+}
+
+/*
+ * Macro: queue_enter_first
+ * Function:
+ * Insert a new element at the head of the queue.
+ * Header:
+ * void queue_enter_first(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ */
+#define queue_enter_first(head, elt, type, field) \
+{ \
+ register queue_entry_t next; \
+ \
+ next = (head)->next; \
+ if ((head) == next) { \
+ (head)->prev = (queue_entry_t) (elt); \
+ } \
+ else { \
+ ((type)next)->field.prev = (queue_entry_t)(elt);\
+ } \
+ (elt)->field.next = next; \
+ (elt)->field.prev = head; \
+ (head)->next = (queue_entry_t) elt; \
+}
+
+/*
+ * Macro: queue_field [internal use only]
+ * Function:
+ * Find the queue_chain_t (or queue_t) for the
+ * given element (thing) in the given queue (head)
+ */
+#define queue_field(head, thing, type, field) \
+ (((head) == (thing)) ? (head) : &((type)(thing))->field)
+
+/*
+ * Macro: queue_remove
+ * Function:
+ * Remove an arbitrary item from the queue.
+ * Header:
+ * void queue_remove(q, qe, type, field)
+ * arguments as in queue_enter
+ */
+#define queue_remove(head, elt, type, field) \
+{ \
+ register queue_entry_t next, prev; \
+ \
+ next = (elt)->field.next; \
+ prev = (elt)->field.prev; \
+ \
+ if ((head) == next) \
+ (head)->prev = prev; \
+ else \
+ ((type)next)->field.prev = prev; \
+ \
+ if ((head) == prev) \
+ (head)->next = next; \
+ else \
+ ((type)prev)->field.next = next; \
+}
+
+/*
+ * Macro: queue_remove_first
+ * Function:
+ * Remove and return the entry at the head of
+ * the queue.
+ * Header:
+ * queue_remove_first(head, entry, type, field)
+ * entry is returned by reference
+ */
+#define queue_remove_first(head, entry, type, field) \
+{ \
+ register queue_entry_t next; \
+ \
+ (entry) = (type) ((head)->next); \
+ next = (entry)->field.next; \
+ \
+ if ((head) == next) \
+ (head)->prev = (head); \
+ else \
+ ((type)(next))->field.prev = (head); \
+ (head)->next = next; \
+}
+
+/*
+ * Macro: queue_remove_last
+ * Function:
+ * Remove and return the entry at the tail of
+ * the queue.
+ * Header:
+ * queue_remove_last(head, entry, type, field)
+ * entry is returned by reference
+ */
+#define queue_remove_last(head, entry, type, field) \
+{ \
+ register queue_entry_t prev; \
+ \
+ (entry) = (type) ((head)->prev); \
+ prev = (entry)->field.prev; \
+ \
+ if ((head) == prev) \
+ (head)->next = (head); \
+ else \
+ ((type)(prev))->field.next = (head); \
+ (head)->prev = prev; \
+}
+
+/*
+ * Macro: queue_assign
+ */
+#define queue_assign(to, from, type, field) \
+{ \
+ ((type)((from)->prev))->field.next = (to); \
+ ((type)((from)->next))->field.prev = (to); \
+ *to = *from; \
+}
+
+/*
+ * Macro: queue_iterate
+ * Function:
+ * iterate over each item in the queue.
+ * Generates a 'for' loop, setting elt to
+ * each item in turn (by reference).
+ * Header:
+ * queue_iterate(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ */
+#define queue_iterate(head, elt, type, field) \
+ for ((elt) = (type) queue_first(head); \
+ !queue_end((head), (queue_entry_t)(elt)); \
+ (elt) = (type) queue_next(&(elt)->field))
+
+
+
+/*----------------------------------------------------------------*/
+/*
+ * Define macros for queues with locks.
+ */
+struct mpqueue_head {
+ struct queue_entry head; /* header for queue */
+ struct mutex lock; /* lock for queue */
+};
+
+typedef struct mpqueue_head mpqueue_head_t;
+
+#define round_mpq(size) (size)
+
+#define mpqueue_init(q) \
+ { \
+ queue_init(&(q)->head); \
+ mutex_init(&(q)->lock); \
+ }
+
+#define mpenqueue_tail(q, elt) \
+ mutex_lock(&(q)->lock); \
+ enqueue_tail(&(q)->head, elt); \
+ mutex_unlock(&(q)->lock);
+
+#define mpdequeue_head(q, elt) \
+ mutex_lock(&(q)->lock); \
+ if (queue_empty(&(q)->head)) \
+ *(elt) = 0; \
+ else \
+ *(elt) = dequeue_head(&(q)->head); \
+ mutex_unlock(&(q)->lock);
+
+/*
+ * Old queue stuff, will go away soon.
+ */
+
+#endif /* _KERN_QUEUE_H_ */
diff --git a/pcnet32/skbuff.h b/pcnet32/skbuff.h
new file mode 100644
index 000000000..5f63b44d1
--- /dev/null
+++ b/pcnet32/skbuff.h
@@ -0,0 +1,482 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <mach.h>
+#include <cthreads.h>
+
+#include "util.h"
+#include "atomic.h"
+#include "linux-types.h"
+
+#define CONFIG_SKB_CHECK 0
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+
+
+#define FREE_READ 1
+#define FREE_WRITE 0
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+struct sk_buff_head
+{
+ struct sk_buff * next;
+ struct sk_buff * prev;
+ __u32 qlen; /* Must be same length as a pointer
+ for using debugging */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+};
+
+
+struct sk_buff
+{
+ struct sk_buff * next; /* Next buffer in list */
+ struct sk_buff * prev; /* Previous buffer in list */
+ struct sk_buff_head * list; /* List we are on */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+ struct sk_buff *link3; /* Link for IP protocol level buffer chains */
+ struct sock *sk; /* Socket we are owned by */
+ unsigned long when; /* used to compute rtt's */
+ struct timeval stamp; /* Time we arrived */
+ struct linux_device *dev; /* Device we arrived on/are leaving by */
+ union
+ {
+ struct tcphdr *th;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ unsigned char *raw;
+ /* for passing file handles in a unix domain socket */
+ void *filp;
+ } h;
+
+ union
+ {
+ /* As yet incomplete physical layer views */
+ unsigned char *raw;
+ struct ethhdr *ethernet;
+ } mac;
+
+ struct iphdr *ip_hdr; /* For IPPROTO_RAW */
+ unsigned long len; /* Length of actual data */
+ unsigned long csum; /* Checksum */
+ __u32 saddr; /* IP source address */
+ __u32 daddr; /* IP target address */
+ __u32 raddr; /* IP next hop address */
+ __u32 seq; /* TCP sequence number */
+ __u32 end_seq; /* seq [+ fin] [+ syn] + datalen */
+ __u32 ack_seq; /* TCP ack sequence number */
+ unsigned char proto_priv[16]; /* Protocol private data */
+ volatile char acked, /* Are we acked ? */
+ used, /* Are we in use ? */
+ free, /* How to free this buffer */
+ arp; /* Has IP/ARP resolution finished */
+ unsigned char tries, /* Times tried */
+ lock, /* Are we locked ? */
+ localroute, /* Local routing asserted for this frame */
+ pkt_type, /* Packet class */
+ pkt_bridged, /* Tracker for bridging */
+ ip_summed; /* Driver fed us an IP checksum */
+#define PACKET_HOST 0 /* To us */
+#define PACKET_BROADCAST 1 /* To all */
+#define PACKET_MULTICAST 2 /* To group */
+#define PACKET_OTHERHOST 3 /* To someone else */
+ unsigned short users; /* User count - see datagram.c,tcp.c */
+ unsigned short protocol; /* Packet protocol from driver. */
+ unsigned int truesize; /* Buffer size */
+
+ atomic_t count; /* reference count */
+ struct sk_buff *data_skb; /* Link to the actual data skb */
+ unsigned char *head; /* Head of buffer */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
+ unsigned char *end; /* End pointer */
+ void (*destructor)(struct sk_buff *); /* Destruct function */
+ __u16 redirport; /* Redirect port */
+#ifdef MACH
+//#ifdef MACH_INCLUDE
+ mach_port_t reply;
+ mach_msg_type_name_t reply_type;
+ vm_address_t copy;
+//#else
+// void *reply;
+// unsigned reply_type;
+// void *copy;
+//#endif
+#endif
+};
+
+#ifdef CONFIG_SKB_LARGE
+#define SK_WMEM_MAX 65535
+#define SK_RMEM_MAX 65535
+#else
+#define SK_WMEM_MAX 32767
+#define SK_RMEM_MAX 32767
+#endif
+
+#if CONFIG_SKB_CHECK
+#define SK_FREED_SKB 0x0DE2C0DE
+#define SK_GOOD_SKB 0xDEC0DED1
+#define SK_HEAD_SKB 0x12231298
+#endif
+
+/*
+ * Handling routines are only of interest to the kernel
+ */
+
+#if 0
+extern void print_skb(struct sk_buff *);
+#endif
+extern void kfree_skb(struct sk_buff *skb, int rw);
+extern void skb_queue_head_init(struct sk_buff_head *list);
+extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
+extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
+extern struct sk_buff * skb_dequeue(struct sk_buff_head *list);
+extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_append(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_unlink(struct sk_buff *buf);
+extern __u32 skb_queue_len(struct sk_buff_head *list);
+extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list);
+extern struct sk_buff * alloc_skb(unsigned int size, int priority);
+extern struct sk_buff * dev_alloc_skb(unsigned int size);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
+extern void skb_device_lock(struct sk_buff *skb);
+extern void skb_device_unlock(struct sk_buff *skb);
+extern void dev_kfree_skb(struct sk_buff *skb, int mode);
+extern int skb_device_locked(struct sk_buff *skb);
+extern unsigned char * skb_put(struct sk_buff *skb, int len);
+extern unsigned char * skb_push(struct sk_buff *skb, int len);
+extern unsigned char * skb_pull(struct sk_buff *skb, int len);
+extern int skb_headroom(struct sk_buff *skb);
+extern int skb_tailroom(struct sk_buff *skb);
+extern void skb_reserve(struct sk_buff *skb, int len);
+extern void skb_trim(struct sk_buff *skb, int len);
+
+extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
+{
+ return (list->next == (struct sk_buff *) list);
+}
+
+/*
+ * Peek an sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. For an interrupt
+ * type system cli() peek the buffer copy the data and sti();
+ */
+extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/*
+ * Return the length of an sk_buff queue
+ */
+
+extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
+{
+ return(list_->qlen);
+}
+
+#if CONFIG_SKB_CHECK
+extern int skb_check(struct sk_buff *skb,int,int, char *);
+#define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__)
+#define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__)
+#else
+#define IS_SKB(skb)
+#define IS_SKB_HEAD(skb)
+
+extern struct mutex skb_queue_lock;
+
+extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+// unsigned long flags;
+
+// save_flags(flags);
+// cli();
+ mutex_lock (&skb_queue_lock);
+ __skb_queue_head(list, newsk);
+ mutex_unlock (&skb_queue_lock);
+// restore_flags(flags);
+}
+
+/*
+ * Insert an sk_buff at the end of a list.
+ */
+
+extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+// unsigned long flags;
+
+// save_flags(flags);
+// cli();
+ mutex_lock (&skb_queue_lock);
+ __skb_queue_tail(list, newsk);
+ mutex_unlock (&skb_queue_lock);
+// restore_flags(flags);
+}
+
+/*
+ * Remove an sk_buff from a list.
+ */
+
+extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+// long flags;
+ struct sk_buff *result;
+
+// save_flags(flags);
+// cli();
+ mutex_lock (&skb_queue_lock);
+ result = __skb_dequeue(list);
+ mutex_unlock (&skb_queue_lock);
+// restore_flags(flags);
+ return result;
+}
+
+/*
+ * Insert a packet on a list.
+ */
+
+extern __inline__ void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff * prev, struct sk_buff *next,
+ struct sk_buff_head * list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = list;
+ list->qlen++;
+}
+
+/*
+ * Place a packet before a given packet in a list
+ */
+extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+// unsigned long flags;
+
+// save_flags(flags);
+// cli();
+ mutex_lock (&skb_queue_lock);
+ __skb_insert(newsk, old->prev, old, old->list);
+ mutex_unlock (&skb_queue_lock);
+// restore_flags(flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+
+extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+// unsigned long flags;
+
+// save_flags(flags);
+// cli();
+ mutex_lock (&skb_queue_lock);
+ __skb_insert(newsk, old, old->next, old->list);
+ mutex_unlock (&skb_queue_lock);
+// restore_flags(flags);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff * next, * prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/*
+ * Remove an sk_buff from its list. Works even without knowing the list it
+ * is sitting on, which can be handy at times. It also means that THE LIST
+ * MUST EXIST when you unlink. Thus a list must have its contents unlinked
+ * _FIRST_.
+ */
+
+extern __inline__ void skb_unlink(struct sk_buff *skb)
+{
+// unsigned long flags;
+
+// save_flags(flags);
+// cli();
+ mutex_lock (&skb_queue_lock);
+ if(skb->list)
+ __skb_unlink(skb, skb->list);
+ mutex_unlock (&skb_queue_lock);
+// restore_flags(flags);
+}
+
+/*
+ * Add data to an sk_buff
+ */
+extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len)
+{
+ unsigned char *tmp=skb->tail;
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end)
+ {
+ __label__ here;
+ panic("skput:over: %p:%d", &&here,len);
+here:
+ ;
+ }
+ return tmp;
+}
+
+extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head)
+ {
+ __label__ here;
+ panic("skpush:under: %p:%d", &&here,len);
+here:
+ ;
+ }
+ return skb->data;
+}
+
+extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len)
+{
+ if(len > skb->len)
+ return NULL;
+ skb->data+=len;
+ skb->len-=len;
+ return skb->data;
+}
+
+extern __inline__ int skb_headroom(struct sk_buff *skb)
+{
+ return skb->data-skb->head;
+}
+
+extern __inline__ int skb_tailroom(struct sk_buff *skb)
+{
+ return skb->end-skb->tail;
+}
+
+extern __inline__ void skb_reserve(struct sk_buff *skb, int len)
+{
+ skb->data+=len;
+ skb->tail+=len;
+}
+
+extern __inline__ void skb_trim(struct sk_buff *skb, int len)
+{
+ if(skb->len>len)
+ {
+ skb->len=len;
+ skb->tail=skb->data+len;
+ }
+}
+
+#endif
+
+extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
+//extern int datagram_select(struct sock *sk, int sel_type, select_table *wait);
+extern void skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
+//extern void skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
+extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
+
+#endif /* _LINUX_SKBUFF_H */
diff --git a/pcnet32/spl.h b/pcnet32/spl.h
new file mode 100644
index 000000000..c414ed87b
--- /dev/null
+++ b/pcnet32/spl.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#ifndef _MACHINE_SPL_H_
+#define _MACHINE_SPL_H_
+
+/*
+ * This file defines the interrupt priority levels used by
+ * machine-dependent code.
+ */
+
+typedef int spl_t;
+
+#define splx(x) ((x) = (x))
+#define spltty() (0)
+#define splsched() (0)
+#define SPLKD() (0)
+
+//extern spl_t (splhi)(void);
+//
+//extern spl_t (spl0)(void);
+//
+//extern spl_t (spl1)(void);
+//
+//extern spl_t (spl2)(void);
+//
+//extern spl_t (spl3)(void);
+//
+//extern spl_t (spl4)(void);
+//extern spl_t (splhdw)(void);
+//
+//extern spl_t (spl5)(void);
+//extern spl_t (spldcm)(void);
+//
+//extern spl_t (spl6)(void);
+//
+//extern spl_t (splsched)(void);
+//
+//extern spl_t (splx)(spl_t n);
+//
+//extern spl_t (splsoftclock)(void);
+//
+//extern void splon (unsigned long n);
+//
+//extern unsigned long sploff (void);
+//
+//extern spl_t splhigh (void);
+//
+//extern spl_t splimp (void);
+//
+//extern spl_t spltty (void);
+//
+//extern spl_t splclock (void);
+//
+//extern void setsoftclock (void);
+
+#endif /* _MACHINE_SPL_H_ */
diff --git a/pcnet32/util.h b/pcnet32/util.h
new file mode 100644
index 000000000..6fb1db284
--- /dev/null
+++ b/pcnet32/util.h
@@ -0,0 +1,33 @@
+#ifndef __UTIL_H__
+#define __UTIL_H__
+
+#include <stdio.h>
+
+#define panic(format, ...) do \
+{ \
+ char buf[1024]; \
+ snprintf (buf, 1024, "devnode: %s", format); \
+ fprintf (stderr , buf, ## __VA_ARGS__); \
+ fflush (stderr); \
+ abort (); \
+} while (0)
+
+#define DEBUG
+
+#ifdef DEBUG
+
+#define debug(format, ...) do \
+{ \
+ char buf[1024]; \
+ snprintf (buf, 1024, "pcnet32: %s: %s\n", __func__, format); \
+ fprintf (stderr , buf, ## __VA_ARGS__); \
+ fflush (stderr); \
+} while (0)
+
+#else
+
+#define debug(format, ...) do {} while (0)
+
+#endif
+
+#endif
diff --git a/pcnet32/vm_param.h b/pcnet32/vm_param.h
new file mode 100644
index 000000000..7b615c8a0
--- /dev/null
+++ b/pcnet32/vm_param.h
@@ -0,0 +1,7 @@
+#ifndef __VM_PARAM_H__
+#define __VM_PARAM_H__
+
+#define PAGE_SIZE __vm_page_size
+#define PAGE_MASK (PAGE_SIZE-1)
+
+#endif