summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2013-02-04 10:27:44 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2013-02-04 10:27:44 +0100
commitba1b3afd50913473f3036a63b4a82d7ba5c42009 (patch)
tree9dff0ddec4bf8b927a025b4bf9882cb1731170f3
parentbfdb3be16e5a20eebc97b3ca613d9a4da4465533 (diff)
parent51e87d005139a435cd846ac5c224eed5042c4fa0 (diff)
Merge branch 'master' into master-gdb_stubs
-rw-r--r--.gitignore39
-rw-r--r--Makefile.am37
-rw-r--r--Makefrag.am33
-rw-r--r--Makerules.am10
-rw-r--r--Makerules.mig.am22
-rw-r--r--configfrag.ac6
-rw-r--r--configure.ac6
-rw-r--r--ddb/db_examine.c2
-rw-r--r--ddb/db_macro.c2
-rw-r--r--ddb/db_macro.h2
-rw-r--r--ddb/db_output.c4
-rw-r--r--ddb/db_output.h6
-rw-r--r--ddb/db_sym.c2
-rw-r--r--ddb/db_task_thread.c4
-rw-r--r--ddb/db_variables.h4
-rw-r--r--device/cons.c109
-rw-r--r--device/cons.h13
-rw-r--r--device/dev_lookup.c17
-rw-r--r--device/dev_pager.c39
-rw-r--r--device/device.srv2
-rw-r--r--device/ds_routines.c52
-rw-r--r--device/ds_routines.h4
-rw-r--r--device/errno.h45
-rw-r--r--device/io_req.h5
-rw-r--r--device/kmsg.h2
-rw-r--r--device/net_io.c41
-rw-r--r--doc/.gitignore4
-rw-r--r--i386/Makefrag.am10
-rw-r--r--i386/configfrag.ac4
-rw-r--r--i386/i386/.gitignore1
-rw-r--r--i386/i386/db_interface.c13
-rw-r--r--i386/i386/db_interface.h1
-rw-r--r--i386/i386/db_machdep.h2
-rw-r--r--i386/i386/db_trace.c129
-rw-r--r--i386/i386/debug_i386.c14
-rw-r--r--i386/i386/fpu.c50
-rw-r--r--i386/i386/fpu.h1
-rw-r--r--i386/i386/gdt.c22
-rw-r--r--i386/i386/gdt.h8
-rw-r--r--i386/i386/hardclock.c8
-rw-r--r--i386/i386/i386asm.sym11
-rw-r--r--i386/i386/idt.c6
-rw-r--r--i386/i386/idt_inittab.S12
-rw-r--r--i386/i386/io_map.c2
-rw-r--r--i386/i386/io_perm.c7
-rw-r--r--i386/i386/ipl.h1
-rw-r--r--i386/i386/ktss.c10
-rw-r--r--i386/i386/ldt.c18
-rw-r--r--i386/i386/locore.S137
-rw-r--r--i386/i386/locore.h34
-rw-r--r--i386/i386/machine_task.c20
-rw-r--r--i386/i386/mp_desc.c6
-rw-r--r--i386/i386/pcb.c56
-rw-r--r--i386/i386/pcb.h2
-rw-r--r--i386/i386/phys.c6
-rw-r--r--i386/i386/pic.h4
-rw-r--r--i386/i386/proc_reg.h48
-rw-r--r--i386/i386/seg.h18
-rw-r--r--i386/i386/spl.S4
-rw-r--r--i386/i386/task.h4
-rw-r--r--i386/i386/thread.h78
-rw-r--r--i386/i386/trap.c40
-rw-r--r--i386/i386/user_ldt.c22
-rw-r--r--i386/i386/user_ldt.h4
-rw-r--r--i386/i386/vm_param.h34
-rw-r--r--i386/i386/xen.h11
-rw-r--r--i386/i386/zalloc.h29
-rw-r--r--i386/i386at/autoconf.c11
-rw-r--r--i386/i386at/boothdr.S37
-rw-r--r--i386/i386at/com.c27
-rw-r--r--i386/i386at/com.h6
-rw-r--r--i386/i386at/cons_conf.c6
-rw-r--r--i386/i386at/kd.c23
-rw-r--r--i386/i386at/kd.h7
-rw-r--r--i386/i386at/kd_event.c194
-rw-r--r--i386/i386at/kd_mouse.c164
-rw-r--r--i386/i386at/lpr.c122
-rw-r--r--i386/i386at/model_dep.c84
-rw-r--r--i386/i386at/pic_isa.c6
-rw-r--r--i386/i386at/rtc.c25
-rw-r--r--i386/include/mach/i386/exec/elf.h8
-rw-r--r--i386/include/mach/i386/vm_param.h8
-rw-r--r--i386/include/mach/i386/vm_types.h6
-rw-r--r--i386/include/mach/sa/stdarg.h2
-rw-r--r--i386/intel/pmap.c262
-rw-r--r--i386/intel/pmap.h24
-rw-r--r--i386/ldscript4
-rw-r--r--i386/xen/Makefrag.am3
-rw-r--r--i386/xen/xen.c4
-rw-r--r--i386/xen/xen_boothdr.S30
-rw-r--r--i386/xen/xen_locore.S2
-rw-r--r--include/device/device.defs28
-rw-r--r--include/mach/gnumach.defs39
-rw-r--r--include/mach/mach.defs84
-rw-r--r--include/mach/mach_host.defs39
-rw-r--r--include/mach/mach_types.defs14
-rw-r--r--include/mach/mach_types.h1
-rw-r--r--include/mach/port.h2
-rw-r--r--include/mach/syscall_sw.h1
-rw-r--r--include/mach/vm_cache_statistics.h41
-rw-r--r--include/mach/xen.h8
-rw-r--r--include/mach_debug/mach_debug.defs20
-rw-r--r--include/mach_debug/mach_debug_types.defs7
-rw-r--r--include/mach_debug/mach_debug_types.h2
-rw-r--r--include/mach_debug/slab_info.h (renamed from include/mach_debug/zone_info.h)57
-rw-r--r--include/string.h2
-rw-r--r--include/sys/types.h2
-rw-r--r--ipc/ipc_entry.c4
-rw-r--r--ipc/ipc_entry.h8
-rw-r--r--ipc/ipc_hash.c10
-rw-r--r--ipc/ipc_hash.h2
-rw-r--r--ipc/ipc_init.c47
-rw-r--r--ipc/ipc_init.h8
-rw-r--r--ipc/ipc_kmsg.c8
-rw-r--r--ipc/ipc_marequest.c27
-rw-r--r--ipc/ipc_marequest.h1
-rw-r--r--ipc/ipc_mqueue.c4
-rw-r--r--ipc/ipc_notify.c6
-rw-r--r--ipc/ipc_object.c8
-rw-r--r--ipc/ipc_object.h10
-rw-r--r--ipc/ipc_port.c3
-rw-r--r--ipc/ipc_pset.c4
-rw-r--r--ipc/ipc_space.c4
-rw-r--r--ipc/ipc_space.h7
-rw-r--r--ipc/ipc_table.c14
-rw-r--r--ipc/ipc_thread.h6
-rw-r--r--ipc/mach_msg.c4
-rw-r--r--ipc/mach_port.c4
-rw-r--r--kern/act.c18
-rw-r--r--kern/ast.h2
-rw-r--r--kern/boot_script.c29
-rw-r--r--kern/boot_script.h2
-rw-r--r--kern/bootstrap.c24
-rw-r--r--kern/compat_xxx_defs.h64
-rw-r--r--kern/debug.c6
-rw-r--r--kern/debug.h2
-rw-r--r--kern/gnumach.srv23
-rw-r--r--kern/ipc_host.c18
-rw-r--r--kern/ipc_kobject.c17
-rw-r--r--kern/ipc_tt.c1
-rw-r--r--kern/kalloc.c254
-rw-r--r--kern/kalloc.h4
-rw-r--r--kern/list.h359
-rw-r--r--kern/lock.c10
-rw-r--r--kern/mach.srv2
-rw-r--r--kern/mach_clock.c7
-rw-r--r--kern/mach_clock.h8
-rw-r--r--kern/mach_factor.c2
-rw-r--r--kern/mach_host.srv2
-rw-r--r--kern/mach_param.h67
-rw-r--r--kern/machine.c54
-rw-r--r--kern/macro_help.h4
-rw-r--r--kern/pc_sample.c3
-rw-r--r--kern/printf.c31
-rw-r--r--kern/printf.h3
-rw-r--r--kern/priority.c1
-rw-r--r--kern/processor.c15
-rw-r--r--kern/rbtree.c483
-rw-r--r--kern/rbtree.h310
-rw-r--r--kern/rbtree_i.h186
-rw-r--r--kern/sched_prim.c7
-rw-r--r--kern/server_loop.ch1
-rw-r--r--kern/slab.c1615
-rw-r--r--kern/slab.h254
-rw-r--r--kern/startup.c5
-rw-r--r--kern/strings.c19
-rw-r--r--kern/syscall_emulation.c65
-rw-r--r--kern/syscall_subr.c16
-rw-r--r--kern/syscall_subr.h1
-rw-r--r--kern/syscall_sw.c6
-rw-r--r--kern/task.c79
-rw-r--r--kern/task.h11
-rw-r--r--kern/thread.c30
-rw-r--r--kern/zalloc.c1007
-rw-r--r--kern/zalloc.h136
-rw-r--r--linux/Makefrag.am11
-rw-r--r--linux/configfrag.ac33
-rw-r--r--linux/dev/arch/i386/kernel/irq.c25
-rw-r--r--linux/dev/drivers/block/floppy.c13
-rw-r--r--linux/dev/drivers/block/genhd.c7
-rw-r--r--linux/dev/glue/block.c88
-rw-r--r--linux/dev/glue/glue.h44
-rw-r--r--linux/dev/glue/kmem.c18
-rw-r--r--linux/dev/glue/misc.c37
-rw-r--r--linux/dev/glue/net.c15
-rw-r--r--linux/dev/include/asm-i386/errno.h266
-rw-r--r--linux/dev/include/asm-i386/page.h7
-rw-r--r--linux/dev/include/asm-i386/system.h4
-rw-r--r--linux/dev/include/linux/blk.h2
-rw-r--r--linux/dev/include/linux/kernel.h4
-rw-r--r--linux/dev/include/linux/notifier.h4
-rw-r--r--linux/dev/init/main.c31
-rw-r--r--linux/dev/kernel/dma.c4
-rw-r--r--linux/dev/kernel/printk.c4
-rw-r--r--linux/dev/kernel/resource.c2
-rw-r--r--linux/dev/kernel/sched.c27
-rw-r--r--linux/dev/kernel/softirq.c12
-rw-r--r--linux/dev/lib/vsprintf.c4
-rw-r--r--linux/pcmcia-cs/glue/wireless_glue.h9
-rw-r--r--linux/src/arch/i386/kernel/bios32.c24
-rw-r--r--linux/src/drivers/block/floppy.c4
-rw-r--r--linux/src/drivers/block/ide-cd.c64
-rw-r--r--linux/src/drivers/block/ide.c104
-rw-r--r--linux/src/drivers/block/ide.h8
-rw-r--r--linux/src/drivers/block/triton.c820
-rw-r--r--linux/src/drivers/net/3c503.c2
-rw-r--r--linux/src/drivers/net/3c505.c4
-rw-r--r--linux/src/drivers/net/8390.c5
-rw-r--r--linux/src/drivers/net/at1700.c2
-rw-r--r--linux/src/drivers/net/de4x5.c20
-rw-r--r--linux/src/drivers/net/depca.c8
-rw-r--r--linux/src/drivers/net/pci-scan.c2
-rw-r--r--linux/src/drivers/scsi/FlashPoint.c15
-rw-r--r--linux/src/drivers/scsi/advansys.c28
-rw-r--r--linux/src/drivers/scsi/aha152x.c2
-rw-r--r--linux/src/drivers/scsi/aha1542.c6
-rw-r--r--linux/src/drivers/scsi/aic7xxx.c2
-rw-r--r--linux/src/drivers/scsi/eata.c2
-rw-r--r--linux/src/drivers/scsi/gdth.c5
-rw-r--r--linux/src/drivers/scsi/hosts.c9
-rw-r--r--linux/src/drivers/scsi/ncr53c8xx.c2
-rw-r--r--linux/src/drivers/scsi/ncr53c8xx.h2
-rw-r--r--linux/src/drivers/scsi/ppa.c5
-rw-r--r--linux/src/drivers/scsi/sym53c8xx.c14696
-rw-r--r--linux/src/drivers/scsi/sym53c8xx.h116
-rw-r--r--linux/src/drivers/scsi/sym53c8xx_comm.h2717
-rw-r--r--linux/src/drivers/scsi/sym53c8xx_defs.h1767
-rw-r--r--linux/src/drivers/scsi/ultrastor.c12
-rw-r--r--linux/src/drivers/scsi/wd7000.c2
-rw-r--r--linux/src/include/asm-i386/io.h6
-rw-r--r--linux/src/include/asm-i386/posix_types.h4
-rw-r--r--linux/src/include/asm-i386/string-486.h702
-rw-r--r--linux/src/include/asm-i386/types.h2
-rw-r--r--linux/src/include/linux/compatmac.h5
-rw-r--r--linux/src/include/linux/compiler-gcc.h106
-rw-r--r--linux/src/include/linux/compiler-gcc3.h23
-rw-r--r--linux/src/include/linux/compiler-gcc4.h57
-rw-r--r--linux/src/include/linux/compiler.h311
-rw-r--r--linux/src/include/linux/ctype.h70
-rw-r--r--linux/src/include/linux/hdreg.h49
-rw-r--r--linux/src/include/linux/init.h (renamed from linux/pcmcia-cs/include/linux/init.h)10
-rw-r--r--linux/src/include/linux/interrupt.h6
-rw-r--r--linux/src/include/linux/kcomp.h2
-rw-r--r--linux/src/include/linux/module.h3
-rw-r--r--linux/src/include/linux/stddef.h2
-rw-r--r--linux/src/init/main.c3
-rw-r--r--linux/src/kernel/softirq.c10
-rw-r--r--linux/src/lib/ctype.c54
-rw-r--r--tests/.gitignore1
-rw-r--r--vm/memory_object.c28
-rw-r--r--vm/memory_object_proxy.c24
-rw-r--r--vm/memory_object_proxy.h48
-rw-r--r--vm/vm_external.c43
-rw-r--r--vm/vm_fault.c30
-rw-r--r--vm/vm_init.c8
-rw-r--r--vm/vm_kern.c30
-rw-r--r--vm/vm_kern.h4
-rw-r--r--vm/vm_map.c308
-rw-r--r--vm/vm_map.h19
-rw-r--r--vm/vm_object.c123
-rw-r--r--vm/vm_object.h23
-rw-r--r--vm/vm_page.h1
-rw-r--r--vm/vm_pageout.c8
-rw-r--r--vm/vm_resident.c46
-rw-r--r--vm/vm_user.c33
-rw-r--r--vm/vm_user.h2
-rw-r--r--xen/block.c11
-rw-r--r--xen/block.h2
-rw-r--r--xen/configfrag.ac34
-rw-r--r--xen/console.c13
-rw-r--r--xen/console.h9
-rw-r--r--xen/evt.c6
-rw-r--r--xen/evt.h2
-rw-r--r--xen/grant.c2
-rw-r--r--xen/grant.h2
-rw-r--r--xen/net.c97
-rw-r--r--xen/net.h2
-rw-r--r--xen/ring.c2
-rw-r--r--xen/ring.h2
-rw-r--r--xen/store.c5
-rw-r--r--xen/store.h2
-rw-r--r--xen/time.c7
-rw-r--r--xen/time.h2
-rw-r--r--xen/xen.c9
-rw-r--r--xen/xen.h3
285 files changed, 26324 insertions, 5481 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..4488bc2d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,39 @@
+*~
+
+.deps/
+.dirstamp
+*.o
+*.a
+/gnumach
+/gnumach-undef
+/gnumach-undef-bad
+/gnumach.msgids
+
+# Makerules.am
+*.gz
+*.stripped
+
+# Makerules.mig.am
+*.user.*
+*.server.*
+
+autom4te.cache/
+build-aux/
+/INSTALL
+/Makefile
+/Makefile.in
+/aclocal.m4
+/config.h
+/config.h.in
+/config.log
+/config.status
+/config.status.orig
+/configure
+/stamp-h1
+/version.c
+
+# Ignore arch symlinks
+/machine
+/mach/machine
+/linux/src/include/asm
+/linux/dev/include/asm
diff --git a/Makefile.am b/Makefile.am
index 04885ddb..319b7e80 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,6 +1,6 @@
# Makefile for GNU Mach.
-# Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
+# Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
@@ -51,6 +51,9 @@ AM_CPPFLAGS += \
-I$(top_srcdir)/$(systype)/include/mach/sa \
-I$(top_srcdir)/include
+AM_CFLAGS += \
+ -fno-builtin-log
+
# Yes, this makes the eyes hurt. But perhaps someone will finally take care of
# all that scruffy Mach code... Also see <http://savannah.gnu.org/task/?5726>.
AM_CFLAGS += \
@@ -68,6 +71,26 @@ AM_CFLAGS += \
endif
#
+# Silent build support.
+#
+
+AWK_V = $(AWK_V_$(V))
+AWK_V_ = $(AWK_V_$(AM_DEFAULT_VERBOSITY))
+AWK_V_0 = @echo " AWK $@";
+
+GZIP_V = $(GZIP_V_$(V))
+GZIP_V_ = $(GZIP_V_$(AM_DEFAULT_VERBOSITY))
+GZIP_V_0 = @echo " GZIP $@";
+
+NM_V = $(NM_V_$(V))
+NM_V_ = $(NM_V_$(AM_DEFAULT_VERBOSITY))
+NM_V_0 = @echo " NM $@";
+
+MIGCOM_V = $(MIGCOM_V_$(V))
+MIGCOM_V_ = $(MIGCOM_V_$(AM_DEFAULT_VERBOSITY))
+MIGCOM_V_0 = @echo " MIG $@";
+
+#
# MIG Setup.
#
@@ -133,22 +156,22 @@ noinst_PROGRAMS += \
gnumach.o
# This is the list of routines we decide is OK to steal from the C library.
-clib_routines := memcmp memcpy memmove memset bcopy bzero \
- strchr strstr strsep strpbrk strtok \
+clib_routines := memcmp memcpy memmove \
+ strchr strstr strsep strtok \
htonl htons ntohl ntohs \
udivdi3 __udivdi3 \
__rel_iplt_start __rel_iplt_end \
_START _start etext _edata end _end # actually ld magic, not libc.
gnumach-undef: gnumach.$(OBJEXT)
- $(NM) -u $< | sed 's/ *U *//' | sort -u > $@
+ $(NM_V) $(NM) -u $< | sed 's/ *U *//' | sort -u > $@
MOSTLYCLEANFILES += gnumach-undef
gnumach-undef-bad: gnumach-undef Makefile
- sed '$(foreach r,$(clib_routines),/^$r$$/d;)' $< > $@
+ $(AM_V_GEN) sed '$(foreach r,$(clib_routines),/^$r$$/d;)' $< > $@
MOSTLYCLEANFILES += gnumach-undef-bad
clib-routines.o: gnumach-undef gnumach-undef-bad
- if test -s gnumach-undef-bad; \
+ $(AM_V_at) if test -s gnumach-undef-bad; \
then cat gnumach-undef-bad; exit 2; else true; fi
- $(CCLD) -nostdlib -nostartfiles -r -static \
+ $(AM_V_CCLD) $(CCLD) -nostdlib -nostartfiles -r -static \
-o $@ `sed 's/^/-Wl,-u,/' < $<` -x c /dev/null -lc -lgcc
gnumach_LINK = $(LD) $(LINKFLAGS) $(gnumach_LINKFLAGS) -o $@
diff --git a/Makefrag.am b/Makefrag.am
index cc5a01ea..42026f33 100644
--- a/Makefrag.am
+++ b/Makefrag.am
@@ -1,6 +1,7 @@
# Main Makefile fragment for GNU Mach.
-# Copyright (C) 1997, 1999, 2004, 2006, 2007 Free Software Foundation, Inc.
+# Copyright (C) 1997, 1999, 2004, 2006, 2007, 2009 Free Software
+# Foundation, Inc.
# Permission to use, copy, modify and distribute this software and its
# documentation is hereby granted, provided that both the copyright
@@ -61,6 +62,9 @@ libkernel_a_SOURCES += \
ddb/nlist.h \
ddb/stab.h \
ddb/tr.h
+
+# We need frame pointers for trace to work properly.
+AM_CFLAGS += -fno-omit-frame-pointer
endif
#
@@ -135,7 +139,6 @@ libkernel_a_SOURCES += \
kern/ast.h \
kern/boot_script.h \
kern/bootstrap.c \
- kern/compat_xxx_defs.h \
kern/counters.c \
kern/counters.h \
kern/cpu_number.h \
@@ -156,9 +159,9 @@ libkernel_a_SOURCES += \
kern/ipc_sched.h \
kern/ipc_tt.c \
kern/ipc_tt.h \
- kern/kalloc.c \
kern/kalloc.h \
kern/kern_types.h \
+ kern/list.h \
kern/lock.c \
kern/lock.h \
kern/lock_mon.c \
@@ -166,7 +169,6 @@ libkernel_a_SOURCES += \
kern/mach_clock.h \
kern/mach_factor.c \
kern/mach_factor.h \
- kern/mach_param.h \
kern/machine.c \
kern/machine.h \
kern/macro_help.h \
@@ -180,7 +182,12 @@ libkernel_a_SOURCES += \
kern/profile.c \
kern/queue.c \
kern/queue.h \
+ kern/rbtree.c \
+ kern/rbtree.h \
+ kern/rbtree_i.h \
kern/refcount.h \
+ kern/slab.c \
+ kern/slab.h \
kern/sched.h \
kern/sched_prim.c \
kern/sched_prim.h \
@@ -205,13 +212,12 @@ libkernel_a_SOURCES += \
kern/timer.h \
kern/xpr.c \
kern/xpr.h \
- kern/zalloc.c \
- kern/zalloc.h \
kern/elf-load.c \
kern/boot_script.c
EXTRA_DIST += \
kern/mach.srv \
kern/mach4.srv \
+ kern/gnumach.srv \
kern/mach_debug.srv \
kern/mach_host.srv
@@ -233,6 +239,7 @@ libkernel_a_SOURCES += \
libkernel_a_SOURCES += \
vm/memory_object_proxy.c \
+ vm/memory_object_proxy.h \
vm/memory_object.c \
vm/memory_object.h \
vm/pmap.h \
@@ -287,7 +294,6 @@ libkernel_a_SOURCES += \
device/device_types_kernel.h \
device/ds_routines.c \
device/ds_routines.h \
- device/errno.h \
device/if_ether.h \
device/if_hdr.h \
device/io_req.h \
@@ -348,6 +354,7 @@ include_mach_HEADERS = \
include/mach/exc.defs \
include/mach/mach.defs \
include/mach/mach4.defs \
+ include/mach/gnumach.defs \
include/mach/mach_host.defs \
include/mach/mach_port.defs \
include/mach/mach_types.defs \
@@ -390,6 +397,7 @@ include_mach_HEADERS = \
include/mach/time_value.h \
include/mach/version.h \
include/mach/vm_attributes.h \
+ include/mach/vm_cache_statistics.h \
include/mach/vm_inherit.h \
include/mach/vm_param.h \
include/mach/vm_prot.h \
@@ -406,7 +414,7 @@ include_mach_eXec_HEADERS = \
# mach-debug-headers:= $(addprefix mach_debug/, hash_info.h ipc_info.h \
# mach_debug.defs mach_debug_types.defs mach_debug_types.h \
-# pc_info.h vm_info.h zone_info.h)
+# pc_info.h vm_info.h slab_info.h)
# Other headers for the distribution. We don't install these, because the
# GNU C library has correct versions for users to use.
@@ -485,6 +493,7 @@ nodist_libkernel_a_SOURCES += \
nodist_lib_dep_tr_for_defs_a_SOURCES += \
kern/mach.server.defs.c \
kern/mach4.server.defs.c \
+ kern/gnumach.server.defs.c \
kern/mach_debug.server.defs.c \
kern/mach_host.server.defs.c
nodist_libkernel_a_SOURCES += \
@@ -494,6 +503,9 @@ nodist_libkernel_a_SOURCES += \
kern/mach4.server.h \
kern/mach4.server.c \
kern/mach4.server.msgids \
+ kern/gnumach.server.h \
+ kern/gnumach.server.c \
+ kern/gnumach.server.msgids \
kern/mach_debug.server.h \
kern/mach_debug.server.c \
kern/mach_debug.server.msgids \
@@ -502,6 +514,7 @@ nodist_libkernel_a_SOURCES += \
kern/mach_host.server.msgids
# kern/mach.server.defs
# kern/mach4.server.defs
+# kern/gnumach.server.defs
# kern/mach_debug.server.defs
# kern/mach_host.server.defs
@@ -509,8 +522,8 @@ nodist_libkernel_a_SOURCES += \
MOSTLYCLEANFILES += \
gnumach.msgids
gnumach.msgids: $(filter %.msgids,$(nodist_libkernel_a_SOURCES))
- cat $^ > $@.new
- mv $@.new $@
+ $(AM_V_at) cat $^ > $@.new
+ $(AM_V_GEN) mv $@.new $@
# `exec_' prefix, so that we don't try to build that file during when running
# `make install-data', as it may fail there, but isn't needed there either.
exec_msgidsdir = $(datadir)/msgids
diff --git a/Makerules.am b/Makerules.am
index 37d383ae..b1f17d12 100644
--- a/Makerules.am
+++ b/Makerules.am
@@ -1,6 +1,6 @@
# Makerules: how to do some things.
-# Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+# Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
# Permission to use, copy, modify and distribute this software and its
# documentation is hereby granted, provided that both the copyright
@@ -20,11 +20,11 @@
EXTRA_DIST += \
gensym.awk
%.symc: %.sym gensym.awk
- $(AWK) -f $(word 2,$^) $< > $@
+ $(AWK_V) $(AWK) -f $(word 2,$^) $< > $@
%.symc.o: %.symc
- $(COMPILE) -S -x c -o $@ $<
+ $(AM_V_CC) $(COMPILE) -S -x c -o $@ $<
%.h: %.symc.o
- sed < $< > $@ \
+ $(AM_V_GEN) sed < $< > $@ \
-e 's/^[^*].*$$//' \
-e 's/^[*]/#define/' \
-e 's/mAgIc[^-0-9]*//'
@@ -37,7 +37,7 @@ include Makerules.mig.am
#
%.gz: %
- $(GZIP) -9 < $< > $@
+ $(GZIP_V) $(GZIP) -9 < $< > $@
#
# strip files.
diff --git a/Makerules.mig.am b/Makerules.mig.am
index b3f76da2..30609846 100644
--- a/Makerules.mig.am
+++ b/Makerules.mig.am
@@ -1,6 +1,6 @@
# Makerules.mig: how to do some MIG-related things.
-# Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+# Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
@@ -72,20 +72,20 @@ lib_dep_tr_for_defs_a_CPPFLAGS = $(AM_CPPFLAGS) \
-E
%.server.defs.c: %.srv
- rm -f $@
- cp -p $< $@
+ $(AM_V_at) rm -f $@
+ $(AM_V_GEN) cp -p $< $@
%.user.defs.c: %.cli
- rm -f $@
- cp -p $< $@
+ $(AM_V_at) rm -f $@
+ $(AM_V_GEN) cp -p $< $@
%.server.h %.server.c %.server.msgids: lib_dep_tr_for_defs_a-%.server.defs.$(OBJEXT)
- $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMSFLAGS) \
- -sheader $*.server.h -server $*.server.c \
- -list $*.server.msgids \
+ $(MIGCOM_V) $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMSFLAGS) \
+ -sheader $*.server.h -server $*.server.c \
+ -list $*.server.msgids \
< $<
%.user.h %.user.c %.user.msgids: lib_dep_tr_for_defs_a-%.user.defs.$(OBJEXT)
- $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMUFLAGS) \
- -user $*.user.c -header $*.user.h \
- -list $*.user.msgids \
+ $(MIGCOM_V) $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMUFLAGS) \
+ -user $*.user.c -header $*.user.h \
+ -list $*.user.msgids \
< $<
# This is how it should be done, but this is not integrated into GNU Automake
diff --git a/configfrag.ac b/configfrag.ac
index 1be36cb8..57146c93 100644
--- a/configfrag.ac
+++ b/configfrag.ac
@@ -102,6 +102,12 @@ AC_DEFINE([STAT_TIME], [1], [STAT_TIME])
# Kernel tracing.
AC_DEFINE([XPR_DEBUG], [1], [XPR_DEBUG])
+
+# Slab allocator debugging facilities.
+AC_DEFINE([SLAB_VERIFY], [0], [SLAB_VERIFY])
+
+# Enable the CPU pool layer in the slab allocator.
+AC_DEFINE([SLAB_USE_CPU_POOLS], [0], [SLAB_USE_CPU_POOLS])
#
# Options.
diff --git a/configure.ac b/configure.ac
index 50ec6b40..3a7d3be6 100644
--- a/configure.ac
+++ b/configure.ac
@@ -32,6 +32,9 @@ dnl Do not clutter the main build directory.
dnl We require GNU make.
[-Wall -Wno-portability]
)
+
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([no])],
+ [AC_SUBST([AM_DEFAULT_VERBOSITY], [1])])
#
# Deduce the architecture we're building for.
@@ -149,7 +152,8 @@ m4_include([linux/configfrag.ac])
[ssp_possible=yes]
AC_MSG_CHECKING([whether the compiler accepts `-fstack-protector'])
# Is this a reliable test case?
-AC_LANG_CONFTEST([[void foo (void) { volatile char a[8]; a[3]; }]])
+AC_LANG_CONFTEST(
+ [AC_LANG_SOURCE([[void foo (void) { volatile char a[8]; a[3]; }]])])
[# `$CC -c -o ...' might not be portable. But, oh, well... Is calling
# `ac_compile' like this correct, after all?
if eval "$ac_compile -S -fstack-protector -o conftest.s" 2> /dev/null; then]
diff --git a/ddb/db_examine.c b/ddb/db_examine.c
index 2df261f5..96c5eee1 100644
--- a/ddb/db_examine.c
+++ b/ddb/db_examine.c
@@ -480,7 +480,7 @@ db_xcdump(addr, size, count, task)
if (!DB_CHECK_ACCESS(next_page_addr, sizeof(int), task))
bcount = next_page_addr - addr;
}
- db_read_bytes((char *)addr, bcount, data, task);
+ db_read_bytes(addr, bcount, data, task);
for (i = 0; i < bcount && off != 0; i += size) {
if (i % 4 == 0)
db_printf(" ");
diff --git a/ddb/db_macro.c b/ddb/db_macro.c
index e372c236..43bb5837 100644
--- a/ddb/db_macro.c
+++ b/ddb/db_macro.c
@@ -165,7 +165,7 @@ db_exec_macro(name)
return(0);
}
-int
+long
/* ARGSUSED */
db_arg_variable(vp, valuep, flag, ap)
struct db_variable *vp;
diff --git a/ddb/db_macro.h b/ddb/db_macro.h
index 12ed16e4..da5626f9 100644
--- a/ddb/db_macro.h
+++ b/ddb/db_macro.h
@@ -32,7 +32,7 @@ extern void db_show_macro (void);
extern int db_exec_macro (char *name);
-extern int db_arg_variable (
+extern long db_arg_variable (
struct db_variable *vp,
db_expr_t *valuep,
int flag,
diff --git a/ddb/db_output.c b/ddb/db_output.c
index 57d6856d..3ea2caac 100644
--- a/ddb/db_output.c
+++ b/ddb/db_output.c
@@ -213,7 +213,7 @@ db_printf(const char *fmt, ...)
db_printf_enter(); /* optional multiP serialization */
#endif
va_start(listp, fmt);
- _doprnt(fmt, &listp, db_id_putc, db_radix, 0);
+ _doprnt(fmt, listp, db_id_putc, db_radix, 0);
va_end(listp);
}
@@ -225,7 +225,7 @@ kdbprintf(const char *fmt, ...)
{
va_list listp;
va_start(listp, fmt);
- _doprnt(fmt, &listp, db_id_putc, db_radix, 0);
+ _doprnt(fmt, listp, db_id_putc, db_radix, 0);
va_end(listp);
}
diff --git a/ddb/db_output.h b/ddb/db_output.h
index 3203e307..1159c6ba 100644
--- a/ddb/db_output.h
+++ b/ddb/db_output.h
@@ -35,6 +35,6 @@
extern void db_force_whitespace(void);
extern int db_print_position(void);
extern void db_end_line(void);
-extern void db_printf( const char *fmt, ...);
-extern void db_putchar(int c);
-
+extern void db_printf(const char *fmt, ...);
+extern void db_putchar(int c);
+extern void kdbprintf(const char *fmt, ...);
diff --git a/ddb/db_sym.c b/ddb/db_sym.c
index dd721384..5c5f7006 100644
--- a/ddb/db_sym.c
+++ b/ddb/db_sym.c
@@ -445,7 +445,7 @@ db_symbol_values(stab, sym, namep, valuep)
* not accept symbols whose value is zero (and use plain hex).
*/
-unsigned int db_maxoff = 0x4000;
+unsigned long db_maxoff = 0x4000;
void
db_task_printsym(off, strategy, task)
diff --git a/ddb/db_task_thread.c b/ddb/db_task_thread.c
index 37629425..1146223b 100644
--- a/ddb/db_task_thread.c
+++ b/ddb/db_task_thread.c
@@ -245,7 +245,7 @@ db_init_default_thread(void)
* in the command line
*/
/* ARGSUSED */
-int
+long
db_set_default_thread(vp, valuep, flag)
struct db_variable *vp;
db_expr_t *valuep;
@@ -270,7 +270,7 @@ db_set_default_thread(vp, valuep, flag)
/*
* convert $taskXXX[.YYY] type DDB variable to task or thread address
*/
-int
+long
db_get_task_thread(vp, valuep, flag, ap)
struct db_variable *vp;
db_expr_t *valuep;
diff --git a/ddb/db_variables.h b/ddb/db_variables.h
index c01a5e25..5249d18c 100644
--- a/ddb/db_variables.h
+++ b/ddb/db_variables.h
@@ -42,7 +42,7 @@ struct db_variable {
char *name; /* Name of variable */
db_expr_t *valuep; /* pointer to value of variable */
/* function to call when reading/writing */
- int (*fcn)(struct db_variable *, db_expr_t *, int, db_var_aux_param_t);
+ long (*fcn)(struct db_variable *, db_expr_t *, int, db_var_aux_param_t);
short min_level; /* number of minimum suffix levels */
short max_level; /* number of maximum suffix levels */
short low; /* low value of level 1 suffix */
@@ -50,7 +50,7 @@ struct db_variable {
#define DB_VAR_GET 0
#define DB_VAR_SET 1
};
-#define FCN_NULL ((int (*)())0)
+#define FCN_NULL ((long (*)())0)
#define DB_VAR_LEVEL 3 /* maximum number of suffix level */
diff --git a/device/cons.c b/device/cons.c
index e3e95ffb..f26f22c5 100644
--- a/device/cons.c
+++ b/device/cons.c
@@ -22,22 +22,10 @@
#include <string.h>
#include <kern/debug.h>
-#ifdef MACH_KERNEL
#include <sys/types.h>
#include <device/conf.h>
#include <mach/boolean.h>
#include <device/cons.h>
-#else
-#include <sys/param.h>
-#include <sys/user.h>
-#include <sys/systm.h>
-#include <sys/buf.h>
-#include <sys/ioctl.h>
-#include <sys/tty.h>
-#include <sys/file.h>
-#include <sys/conf.h>
-#include <hpdev/cons.h>
-#endif
#ifdef MACH_KMSG
#include <device/io_req.h>
@@ -46,9 +34,6 @@
static int cn_inited = 0;
static struct consdev *cn_tab = 0; /* physical console device info */
-#ifndef MACH_KERNEL
-static struct tty *constty = 0; /* virtual console output device */
-#endif
/*
* ROM getc/putc primitives.
@@ -76,10 +61,8 @@ void
cninit()
{
struct consdev *cp;
-#ifdef MACH_KERNEL
dev_ops_t cn_ops;
int x;
-#endif
if (cn_inited)
return;
@@ -103,7 +86,6 @@ cninit()
* Initialize as console
*/
(*cp->cn_init)(cp);
-#ifdef MACH_KERNEL
/*
* Look up its dev_ops pointer in the device table and
* place it in the device indirection table.
@@ -111,7 +93,6 @@ cninit()
if (dev_name_lookup(cp->cn_name, &cn_ops, &x) == FALSE)
panic("cninit: dev_name_lookup failed");
dev_set_indirection("console", cn_ops, minor(cp->cn_dev));
-#endif
#if CONSBUFSIZE > 0
/*
* Now that the console is initialized, dump any chars in
@@ -134,97 +115,9 @@ cninit()
/*
* No console device found, not a problem for BSD, fatal for Mach
*/
-#ifdef MACH_KERNEL
panic("can't find a console device");
-#endif
}
-#ifndef MACH_KERNEL
-cnopen(dev, flag)
- dev_t dev;
-{
- if (cn_tab == NULL)
- return(0);
- dev = cn_tab->cn_dev;
- return ((*cdevsw[major(dev)].d_open)(dev, flag));
-}
-
-cnclose(dev, flag)
- dev_t dev;
-{
- if (cn_tab == NULL)
- return(0);
- dev = cn_tab->cn_dev;
- return ((*cdevsw[major(dev)].d_close)(dev, flag));
-}
-
-cnread(dev, uio)
- dev_t dev;
- struct uio *uio;
-{
- if (cn_tab == NULL)
- return(0);
- dev = cn_tab->cn_dev;
- return ((*cdevsw[major(dev)].d_read)(dev, uio));
-}
-
-cnwrite(dev, uio)
- dev_t dev;
- struct uio *uio;
-{
- if (cn_tab == NULL)
- return(0);
- dev = cn_tab->cn_dev;
- return ((*cdevsw[major(dev)].d_write)(dev, uio));
-}
-
-cnioctl(dev, cmd, data, flag)
- dev_t dev;
- caddr_t data;
-{
- if (cn_tab == NULL)
- return(0);
- /*
- * Superuser can always use this to wrest control of console
- * output from the "virtual" console.
- */
- if (cmd == TIOCCONS && constty) {
- if (!suser())
- return(EPERM);
- constty = NULL;
- return(0);
- }
- dev = cn_tab->cn_dev;
- return ((*cdevsw[major(dev)].d_ioctl)(dev, cmd, data, flag));
-}
-
-cnselect(dev, rw)
- dev_t dev;
- int rw;
-{
- if (cn_tab == NULL)
- return(1);
- return(ttselect(cn_tab->cn_dev, rw));
-}
-
-#ifndef hp300
-/*
- * XXX Should go away when the new CIO MUX driver is in place
- */
-#define d_control d_mmap
-cncontrol(dev, cmd, data)
- dev_t dev;
- int cmd;
- int data;
-{
- if (cn_tab == NULL)
- return(0);
- dev = cn_tab->cn_dev;
- return((*cdevsw[major(dev)].d_control)(dev, cmd, data));
-}
-#undef d_control
-#endif
-#endif
int
cngetc()
@@ -236,7 +129,6 @@ cngetc()
return (0);
}
-#ifdef MACH_KERNEL
int
cnmaygetc()
{
@@ -246,7 +138,6 @@ cnmaygetc()
return ((*romgetc)(0));
return (0);
}
-#endif
void
cnputc(c)
diff --git a/device/cons.h b/device/cons.h
index a6b04fff..6a0ae850 100644
--- a/device/cons.h
+++ b/device/cons.h
@@ -21,16 +21,16 @@
* Utah $Hdr: cons.h 1.10 94/12/14$
*/
+#ifndef _DEVICE_CONS_H
+#define _DEVICE_CONS_H
#include <sys/types.h>
struct consdev {
-#ifdef MACH_KERNEL
char *cn_name; /* name of device in dev_name_list */
-#endif
- int (*cn_probe)(); /* probe hardware and fill in consdev info */
- int (*cn_init)(); /* turn on as console */
- int (*cn_getc)(); /* kernel getchar interface */
- int (*cn_putc)(); /* kernel putchar interface */
+ int (*cn_probe)(struct consdev *cp); /* probe hardware and fill in consdev info */
+ int (*cn_init)(struct consdev *cp); /* turn on as console */
+ int (*cn_getc)(dev_t dev, int wait); /* kernel getchar interface */
+ int (*cn_putc)(dev_t dev, int c); /* kernel putchar interface */
dev_t cn_dev; /* major/minor of device */
short cn_pri; /* pecking order; the higher the better */
};
@@ -57,3 +57,4 @@ extern int cngetc(void);
extern int cnmaygetc(void);
extern void cnputc(char);
+#endif /* _DEVICE_CONS_H */
diff --git a/device/dev_lookup.c b/device/dev_lookup.c
index 2391e8d7..98a2d02c 100644
--- a/device/dev_lookup.c
+++ b/device/dev_lookup.c
@@ -32,7 +32,7 @@
#include <mach/vm_param.h>
#include <kern/queue.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <device/device_types.h>
#include <device/dev_hdr.h>
@@ -62,7 +62,7 @@ queue_head_t dev_number_hash_table[NDEVHASH];
decl_simple_lock_data(,
dev_number_lock)
-zone_t dev_hdr_zone;
+struct kmem_cache dev_hdr_cache;
/*
* Enter device in the number lookup table.
@@ -151,7 +151,7 @@ device_lookup(name)
simple_unlock(&dev_number_lock);
- new_device = (mach_device_t) zalloc(dev_hdr_zone);
+ new_device = (mach_device_t) kmem_cache_alloc(&dev_hdr_cache);
simple_lock_init(&new_device->ref_lock);
new_device->ref_count = 1;
simple_lock_init(&new_device->lock);
@@ -187,7 +187,7 @@ device_lookup(name)
simple_unlock(&dev_number_lock);
if (new_device != MACH_DEVICE_NULL)
- zfree(dev_hdr_zone, (vm_offset_t)new_device);
+ kmem_cache_free(&dev_hdr_cache, (vm_offset_t)new_device);
}
return (device);
@@ -233,7 +233,7 @@ mach_device_deallocate(device)
simple_unlock(&device->ref_lock);
simple_unlock(&dev_number_lock);
- zfree(dev_hdr_zone, (vm_offset_t)device);
+ kmem_cache_free(&dev_hdr_cache, (vm_offset_t)device);
}
/*
@@ -376,9 +376,6 @@ dev_lookup_init()
simple_lock_init(&dev_port_lock);
- dev_hdr_zone = zinit(sizeof(struct mach_device), 0,
- sizeof(struct mach_device) * NDEVICES,
- PAGE_SIZE,
- FALSE,
- "open device entry");
+ kmem_cache_init(&dev_hdr_cache, "mach_device",
+ sizeof(struct mach_device), 0, NULL, NULL, NULL, 0);
}
diff --git a/device/dev_pager.c b/device/dev_pager.c
index 224be850..e0ca2c76 100644
--- a/device/dev_pager.c
+++ b/device/dev_pager.c
@@ -44,8 +44,7 @@
#include <kern/debug.h>
#include <kern/printf.h>
#include <kern/queue.h>
-#include <kern/zalloc.h>
-#include <kern/kalloc.h>
+#include <kern/slab.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
@@ -55,6 +54,7 @@
#include <device/ds_routines.h>
#include <device/dev_hdr.h>
#include <device/io_req.h>
+#include <device/memory_object_reply.user.h>
extern vm_offset_t block_io_mmap(); /* dummy routine to allow
mmap for block devices */
@@ -125,7 +125,7 @@ typedef struct dev_pager *dev_pager_t;
#define DEV_PAGER_NULL ((dev_pager_t)0)
-zone_t dev_pager_zone;
+struct kmem_cache dev_pager_cache;
void dev_pager_reference(register dev_pager_t ds)
{
@@ -143,7 +143,7 @@ void dev_pager_deallocate(register dev_pager_t ds)
}
simple_unlock(&ds->lock);
- zfree(dev_pager_zone, (vm_offset_t)ds);
+ kmem_cache_free(&dev_pager_cache, (vm_offset_t)ds);
}
/*
@@ -160,12 +160,12 @@ struct dev_pager_entry {
typedef struct dev_pager_entry *dev_pager_entry_t;
queue_head_t dev_pager_hashtable[DEV_PAGER_HASH_COUNT];
-zone_t dev_pager_hash_zone;
+struct kmem_cache dev_pager_hash_cache;
decl_simple_lock_data(,
dev_pager_hash_lock)
#define dev_pager_hash(name_port) \
- (((natural_t)(name_port) & 0xffffff) % DEV_PAGER_HASH_COUNT)
+ (((vm_offset_t)(name_port) & 0xffffff) % DEV_PAGER_HASH_COUNT)
void dev_pager_hash_init(void)
{
@@ -173,13 +173,8 @@ void dev_pager_hash_init(void)
register vm_size_t size;
size = sizeof(struct dev_pager_entry);
- dev_pager_hash_zone = zinit(
- size,
- 0,
- size * 1000,
- PAGE_SIZE,
- FALSE,
- "dev_pager port hash");
+ kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0,
+ NULL, NULL, NULL, 0);
for (i = 0; i < DEV_PAGER_HASH_COUNT; i++)
queue_init(&dev_pager_hashtable[i]);
simple_lock_init(&dev_pager_hash_lock);
@@ -191,7 +186,7 @@ void dev_pager_hash_insert(
{
register dev_pager_entry_t new_entry;
- new_entry = (dev_pager_entry_t) zalloc(dev_pager_hash_zone);
+ new_entry = (dev_pager_entry_t) kmem_cache_alloc(&dev_pager_hash_cache);
new_entry->name = name_port;
new_entry->pager_rec = rec;
@@ -219,7 +214,7 @@ void dev_pager_hash_delete(ipc_port_t name_port)
}
simple_unlock(&dev_pager_hash_lock);
if (entry)
- zfree(dev_pager_hash_zone, (vm_offset_t)entry);
+ kmem_cache_free(&dev_pager_hash_cache, (vm_offset_t)entry);
}
dev_pager_t dev_pager_hash_lookup(ipc_port_t name_port)
@@ -272,7 +267,7 @@ kern_return_t device_pager_setup(
return (D_SUCCESS);
}
- d = (dev_pager_t) zalloc(dev_pager_zone);
+ d = (dev_pager_t) kmem_cache_alloc(&dev_pager_cache);
if (d == DEV_PAGER_NULL)
return (KERN_RESOURCE_SHORTAGE);
@@ -341,7 +336,7 @@ kern_return_t device_pager_data_request(
#endif /* lint */
if (device_pager_debug)
- printf("(device_pager)data_request: pager=%p, offset=0x%x, length=0x%x\n",
+ printf("(device_pager)data_request: pager=%p, offset=0x%lx, length=0x%x\n",
pager, offset, length);
ds = dev_pager_hash_lookup((ipc_port_t)pager);
@@ -725,15 +720,11 @@ void device_pager_init(void)
register vm_size_t size;
/*
- * Initialize zone of paging structures.
+ * Initialize cache of paging structures.
*/
size = sizeof(struct dev_pager);
- dev_pager_zone = zinit(size,
- 0,
- (vm_size_t) size * 1000,
- PAGE_SIZE,
- FALSE,
- "device pager structures");
+ kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0,
+ NULL, NULL, NULL, 0);
/*
* Initialize the name port hashing stuff.
diff --git a/device/device.srv b/device/device.srv
index 06aa0be3..f63813f9 100644
--- a/device/device.srv
+++ b/device/device.srv
@@ -24,6 +24,4 @@
#define KERNEL_SERVER 1
-simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
-
#include <device/device.defs>
diff --git a/device/ds_routines.c b/device/ds_routines.c
index 93569f28..68589dee 100644
--- a/device/ds_routines.c
+++ b/device/ds_routines.c
@@ -73,7 +73,7 @@
#include <kern/debug.h>
#include <kern/printf.h>
#include <kern/queue.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <kern/thread.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
@@ -99,6 +99,7 @@
extern struct device_emulation_ops linux_block_emulation_ops;
#ifdef CONFIG_INET
extern struct device_emulation_ops linux_net_emulation_ops;
+extern void free_skbuffs ();
#ifdef CONFIG_PCMCIA
extern struct device_emulation_ops linux_pcmcia_emulation_ops;
#endif
@@ -129,6 +130,9 @@ static struct device_emulation_ops *emulation_list[] =
&mach_device_emulation_ops,
};
+static struct vm_map device_io_map_store;
+vm_map_t device_io_map = &device_io_map_store;
+
#define NUM_EMULATION (sizeof (emulation_list) / sizeof (emulation_list[0]))
io_return_t
@@ -852,7 +856,7 @@ device_write_get(ior, wait)
*/
if (ior->io_op & IO_INBAND) {
assert(ior->io_count <= sizeof (io_buf_ptr_inband_t));
- new_addr = zalloc(io_inband_zone);
+ new_addr = kmem_cache_alloc(&io_inband_cache);
memcpy((void*)new_addr, ior->io_data, ior->io_count);
ior->io_data = (io_buf_ptr_t)new_addr;
ior->io_alloc_size = sizeof (io_buf_ptr_inband_t);
@@ -932,7 +936,7 @@ device_write_dealloc(ior)
* Inband case.
*/
if (ior->io_op & IO_INBAND) {
- zfree(io_inband_zone, (vm_offset_t)ior->io_data);
+ kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data);
return (TRUE);
}
@@ -1242,7 +1246,7 @@ kern_return_t device_read_alloc(ior, size)
return (KERN_SUCCESS);
if (ior->io_op & IO_INBAND) {
- ior->io_data = (io_buf_ptr_t) zalloc(io_inband_zone);
+ ior->io_data = (io_buf_ptr_t) kmem_cache_alloc(&io_inband_cache);
ior->io_alloc_size = sizeof(io_buf_ptr_inband_t);
} else {
size = round_page(size);
@@ -1335,7 +1339,7 @@ boolean_t ds_read_done(ior)
if (ior->io_count != 0) {
if (ior->io_op & IO_INBAND) {
if (ior->io_alloc_size > 0)
- zfree(io_inband_zone, (vm_offset_t)ior->io_data);
+ kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data);
} else {
register vm_offset_t end_alloc;
@@ -1445,7 +1449,7 @@ static void
ds_no_senders(notification)
mach_no_senders_notification_t *notification;
{
- printf("ds_no_senders called! device_port=0x%x count=%d\n",
+ printf("ds_no_senders called! device_port=0x%lx count=%d\n",
notification->not_header.msgh_remote_port,
notification->not_count);
}
@@ -1548,11 +1552,9 @@ void mach_device_init()
queue_init(&io_done_list);
simple_lock_init(&io_done_list_lock);
- device_io_map = kmem_suballoc(kernel_map,
- &device_io_min,
- &device_io_max,
- DEVICE_IO_MAP_SIZE,
- FALSE);
+ kmem_submap(device_io_map, kernel_map, &device_io_min, &device_io_max,
+ DEVICE_IO_MAP_SIZE, FALSE);
+
/*
* If the kernel receives many device_write requests, the
* device_io_map might run out of space. To prevent
@@ -1572,11 +1574,8 @@ void mach_device_init()
*/
device_io_map->wait_for_space = TRUE;
- io_inband_zone = zinit(sizeof(io_buf_ptr_inband_t), 0,
- 1000 * sizeof(io_buf_ptr_inband_t),
- 10 * sizeof(io_buf_ptr_inband_t),
- FALSE,
- "io inband read buffers");
+ kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband",
+ sizeof(io_buf_ptr_inband_t), 0, NULL, NULL, NULL, 0);
mach_device_trap_init();
}
@@ -1612,7 +1611,7 @@ void iowait(ior)
*/
#define IOTRAP_REQSIZE 2048
-zone_t io_trap_zone;
+struct kmem_cache io_trap_cache;
/*
* Initialization. Called from mach_device_init().
@@ -1620,24 +1619,21 @@ zone_t io_trap_zone;
static void
mach_device_trap_init(void)
{
- io_trap_zone = zinit(IOTRAP_REQSIZE, 0,
- 256 * IOTRAP_REQSIZE,
- 16 * IOTRAP_REQSIZE,
- FALSE,
- "wired device trap buffers");
+ kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0,
+ NULL, NULL, NULL, 0);
}
/*
* Allocate an io_req_t.
- * Currently zalloc's from io_trap_zone.
+ * Currently allocates from io_trap_cache.
*
- * Could have lists of different size zones.
+ * Could have lists of different size caches.
* Could call a device-specific routine.
*/
io_req_t
ds_trap_req_alloc(mach_device_t device, vm_size_t data_size)
{
- return (io_req_t) zalloc(io_trap_zone);
+ return (io_req_t) kmem_cache_alloc(&io_trap_cache);
}
/*
@@ -1653,7 +1649,7 @@ ds_trap_write_done(io_req_t ior)
/*
* Should look at reply port and maybe send a message.
*/
- zfree(io_trap_zone, (vm_offset_t) ior);
+ kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
/*
* Give up device reference from ds_write_trap.
@@ -1729,7 +1725,7 @@ device_write_trap (mach_device_t device, dev_mode_t mode,
*/
mach_device_deallocate(device);
- zfree(io_trap_zone, (vm_offset_t) ior);
+ kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
return (result);
}
@@ -1820,7 +1816,7 @@ device_writev_trap (mach_device_t device, dev_mode_t mode,
*/
mach_device_deallocate(device);
- zfree(io_trap_zone, (vm_offset_t) ior);
+ kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
return (result);
}
diff --git a/device/ds_routines.h b/device/ds_routines.h
index e1f6aadb..a00a12d5 100644
--- a/device/ds_routines.h
+++ b/device/ds_routines.h
@@ -41,7 +41,9 @@
/*
* Map for device IO memory.
*/
-vm_map_t device_io_map;
+extern vm_map_t device_io_map;
+
+extern queue_head_t io_done_list;
kern_return_t device_read_alloc(io_req_t, vm_size_t);
kern_return_t device_write_get(io_req_t, boolean_t *);
diff --git a/device/errno.h b/device/errno.h
deleted file mode 100644
index e65aa984..00000000
--- a/device/errno.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * Author: David B. Golub, Carnegie Mellon University
- * Date: 8/89
- *
- * Old names for new error codes, for compatibility.
- */
-
-#ifndef _ERRNO_
-#define _ERRNO_
-
-#include <device/device_types.h> /* the real error names */
-
-#define EIO D_IO_ERROR
-#define ENXIO D_NO_SUCH_DEVICE
-#define EINVAL D_INVALID_SIZE /* XXX */
-#define EBUSY D_ALREADY_OPEN
-#define ENOTTY D_INVALID_OPERATION
-#define ENOMEM D_NO_MEMORY
-
-#endif /* _ERRNO_ */
diff --git a/device/io_req.h b/device/io_req.h
index 162524d7..65e23e60 100644
--- a/device/io_req.h
+++ b/device/io_req.h
@@ -35,6 +35,7 @@
#include <mach/port.h>
#include <mach/message.h>
#include <mach/vm_param.h>
+#include <kern/slab.h>
#include <kern/kalloc.h>
#include <kern/lock.h>
#include <vm/vm_page.h>
@@ -124,7 +125,7 @@ struct io_req {
void iodone(io_req_t);
/*
- * Macros to allocate and free IORs - will convert to zones later.
+ * Macros to allocate and free IORs - will convert to caches later.
*/
#define io_req_alloc(ior,size) \
MACRO_BEGIN \
@@ -136,6 +137,6 @@ void iodone(io_req_t);
(kfree((vm_offset_t)(ior), sizeof(struct io_req)))
-zone_t io_inband_zone; /* for inband reads */
+struct kmem_cache io_inband_cache; /* for inband reads */
#endif /* _IO_REQ_ */
diff --git a/device/kmsg.h b/device/kmsg.h
index adfc9453..b8c1f366 100644
--- a/device/kmsg.h
+++ b/device/kmsg.h
@@ -1,7 +1,6 @@
#ifndef _DEVICE_KMSG_H_
#define _DEVICE_KMSG_H_ 1
-#ifdef MACH_KERNEL
#include <sys/types.h>
@@ -15,6 +14,5 @@ io_return_t kmsggetstat (dev_t dev, int flavor,
int *data, unsigned int *count);
void kmsg_putchar (int c);
-#endif /* MACH_KERNEL */
#endif /* !_DEVICE_KMSG_H_ */
diff --git a/device/net_io.c b/device/net_io.c
index 1db9bca9..4ebf9964 100644
--- a/device/net_io.c
+++ b/device/net_io.c
@@ -61,6 +61,7 @@
#include <kern/printf.h>
#include <kern/queue.h>
#include <kern/sched_prim.h>
+#include <kern/slab.h>
#include <kern/thread.h>
#include <machine/machspl.h>
@@ -302,7 +303,7 @@ struct net_rcv_port {
};
typedef struct net_rcv_port *net_rcv_port_t;
-zone_t net_rcv_zone; /* zone of net_rcv_port structs */
+struct kmem_cache net_rcv_cache; /* cache of net_rcv_port structs */
#define NET_HASH_SIZE 256
@@ -324,7 +325,7 @@ struct net_hash_entry {
};
typedef struct net_hash_entry *net_hash_entry_t;
-zone_t net_hash_entry_zone;
+struct kmem_cache net_hash_entry_cache;
/*
* This structure represents a packet filter with multiple sessions.
@@ -393,7 +394,7 @@ int net_add_q_info (ipc_port_t rcv_port);
int bpf_match (
net_hash_header_t hash,
int n_keys,
- unsigned int *keys,
+ unsigned long *keys,
net_hash_entry_t **hash_headpp,
net_hash_entry_t *entpp);
@@ -1195,7 +1196,7 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count)
* If there is no match instruction, we allocate
* a normal packet filter structure.
*/
- my_infp = (net_rcv_port_t) zalloc(net_rcv_zone);
+ my_infp = (net_rcv_port_t) kmem_cache_alloc(&net_rcv_cache);
my_infp->rcv_port = rcv_port;
is_new_infp = TRUE;
} else {
@@ -1205,7 +1206,7 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count)
* a hash table to deal with them.
*/
my_infp = 0;
- hash_entp = (net_hash_entry_t) zalloc(net_hash_entry_zone);
+ hash_entp = (net_hash_entry_t) kmem_cache_alloc(&net_hash_entry_cache);
is_new_infp = FALSE;
}
@@ -1310,7 +1311,8 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count)
ipc_port_release_send(rcv_port);
if (match != 0)
- zfree (net_hash_entry_zone, (vm_offset_t)hash_entp);
+ kmem_cache_free(&net_hash_entry_cache,
+ (vm_offset_t)hash_entp);
rval = D_NO_MEMORY;
goto clean_and_return;
@@ -1526,20 +1528,12 @@ net_io_init()
register vm_size_t size;
size = sizeof(struct net_rcv_port);
- net_rcv_zone = zinit(size,
- 0,
- size * 1000,
- PAGE_SIZE,
- FALSE,
- "net_rcv_port");
+ kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0,
+ NULL, NULL, NULL, 0);
size = sizeof(struct net_hash_entry);
- net_hash_entry_zone = zinit(size,
- 0,
- size * 100,
- PAGE_SIZE,
- FALSE,
- "net_hash_entry");
+ kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0,
+ NULL, NULL, NULL, 0);
size = ikm_plus_overhead(sizeof(struct net_rcv_msg));
net_kmsg_size = round_page(size);
@@ -1644,7 +1638,7 @@ bpf_do_filter(infp, p, wirelen, header, hlen, hash_headpp, entpp)
register unsigned long A, X;
register int k;
- long mem[BPF_MEMWORDS];
+ unsigned long mem[BPF_MEMWORDS];
/* Generic pointer to either HEADER or P according to the specified offset. */
char *data = NULL;
@@ -1655,10 +1649,9 @@ bpf_do_filter(infp, p, wirelen, header, hlen, hash_headpp, entpp)
buflen = NET_RCV_MAX;
*entpp = 0; /* default */
-#ifdef lint
A = 0;
X = 0;
-#endif
+
for (; pc < pc_end; ++pc) {
switch (pc->code) {
@@ -2039,7 +2032,7 @@ int
bpf_match (hash, n_keys, keys, hash_headpp, entpp)
net_hash_header_t hash;
register int n_keys;
- register unsigned int *keys;
+ register unsigned long *keys;
net_hash_entry_t **hash_headpp, *entpp;
{
register net_hash_entry_t head, entp;
@@ -2168,7 +2161,7 @@ net_free_dead_infp (dead_infp)
nextfp = (net_rcv_port_t) queue_next(&infp->input);
ipc_port_release_send(infp->rcv_port);
net_del_q_info(infp->rcv_qlimit);
- zfree(net_rcv_zone, (vm_offset_t) infp);
+ kmem_cache_free(&net_rcv_cache, (vm_offset_t) infp);
}
}
@@ -2191,7 +2184,7 @@ net_free_dead_entp (dead_entp)
ipc_port_release_send(entp->rcv_port);
net_del_q_info(entp->rcv_qlimit);
- zfree(net_hash_entry_zone, (vm_offset_t) entp);
+ kmem_cache_free(&net_hash_entry_cache, (vm_offset_t) entp);
}
}
diff --git a/doc/.gitignore b/doc/.gitignore
new file mode 100644
index 00000000..829355ba
--- /dev/null
+++ b/doc/.gitignore
@@ -0,0 +1,4 @@
+/*.info
+/*.info-*
+/stamp-vti
+/version.texi
diff --git a/i386/Makefrag.am b/i386/Makefrag.am
index db58c9c6..3338e6e6 100644
--- a/i386/Makefrag.am
+++ b/i386/Makefrag.am
@@ -136,7 +136,6 @@ libkernel_a_SOURCES += \
i386/i386/vm_param.h \
i386/i386/vm_tuning.h \
i386/i386/xpr.h \
- i386/i386/zalloc.h \
i386/intel/pmap.c \
i386/intel/pmap.h \
i386/intel/read_fault.c \
@@ -206,9 +205,16 @@ EXTRA_DIST += \
i386/ldscript
if PLATFORM_at
gnumach_LINKFLAGS += \
- --defsym _START=0x100000 \
+ --defsym _START=0xC0100000 \
+ --defsym _START_MAP=0x100000 \
-T '$(srcdir)'/i386/ldscript
endif
+
+AM_CFLAGS += \
+ -mno-3dnow \
+ -mno-mmx \
+ -mno-sse \
+ -mno-sse2
#
# Installation.
diff --git a/i386/configfrag.ac b/i386/configfrag.ac
index e4ce97ef..1eaabcad 100644
--- a/i386/configfrag.ac
+++ b/i386/configfrag.ac
@@ -23,7 +23,9 @@ dnl USE OF THIS SOFTWARE.
# Some of the i386-specific code checks for these.
AC_DEFINE([__ELF__], [1], [__ELF__])
- AC_DEFINE([i386], [1], [i386])
+
+ # Determines the size of the CPU cache line.
+ AC_DEFINE([CPU_L1_SHIFT], [6], [CPU_L1_SHIFT])
[# Does the architecture provide machine-specific interfaces?
mach_machine_routines=1;;
diff --git a/i386/i386/.gitignore b/i386/i386/.gitignore
new file mode 100644
index 00000000..4520a2a1
--- /dev/null
+++ b/i386/i386/.gitignore
@@ -0,0 +1 @@
+/i386asm.h
diff --git a/i386/i386/db_interface.c b/i386/i386/db_interface.c
index d149adc5..66cc8b59 100644
--- a/i386/i386/db_interface.c
+++ b/i386/i386/db_interface.c
@@ -39,6 +39,7 @@
#include <i386/setjmp.h>
#include <i386/pmap.h>
#include <i386/proc_reg.h>
+#include <i386/locore.h>
#include "gdt.h"
#include "trap.h"
@@ -187,10 +188,10 @@ kdb_trap(
* instead of those at its call to KDB.
*/
struct int_regs {
- int edi;
- int esi;
- int ebp;
- int ebx;
+ long edi;
+ long esi;
+ long ebp;
+ long ebx;
struct i386_interrupt_state *is;
};
@@ -390,6 +391,8 @@ db_write_bytes(
oldmap1 = *ptep1;
*ptep1 |= INTEL_PTE_WRITE;
}
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ set_cr4(get_cr4() & ~CR4_PGE);
flush_tlb();
}
@@ -404,6 +407,8 @@ db_write_bytes(
*ptep1 = oldmap1;
}
flush_tlb();
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ set_cr4(get_cr4() | CR4_PGE);
}
}
diff --git a/i386/i386/db_interface.h b/i386/i386/db_interface.h
index a81c8eec..10a02e2b 100644
--- a/i386/i386/db_interface.h
+++ b/i386/i386/db_interface.h
@@ -62,6 +62,7 @@ extern void db_task_name (task_t task);
#define I386_DB_LEN_1 0
#define I386_DB_LEN_2 1
#define I386_DB_LEN_4 3
+#define I386_DB_LEN_8 2 /* For >= Pentium4 and Xen CPUID >= 15 only */
#define I386_DB_LOCAL 1
#define I386_DB_GLOBAL 2
diff --git a/i386/i386/db_machdep.h b/i386/i386/db_machdep.h
index 95e37971..1dba2cd8 100644
--- a/i386/i386/db_machdep.h
+++ b/i386/i386/db_machdep.h
@@ -39,7 +39,7 @@
#include <i386/trap.h>
typedef vm_offset_t db_addr_t; /* address - unsigned */
-typedef int db_expr_t; /* expression - signed */
+typedef long db_expr_t; /* expression - signed */
typedef struct i386_saved_state db_regs_t;
db_regs_t ddb_regs; /* register state */
diff --git a/i386/i386/db_trace.c b/i386/i386/db_trace.c
index c4019b59..4e3bea39 100644
--- a/i386/i386/db_trace.c
+++ b/i386/i386/db_trace.c
@@ -45,7 +45,7 @@
#include "trap.h"
-int
+long
db_i386_reg_value(
struct db_variable *vp,
db_expr_t *valuep,
@@ -56,22 +56,22 @@ db_i386_reg_value(
* Machine register set.
*/
struct db_variable db_regs[] = {
- { "cs", (int *)&ddb_regs.cs, db_i386_reg_value },
- { "ds", (int *)&ddb_regs.ds, db_i386_reg_value },
- { "es", (int *)&ddb_regs.es, db_i386_reg_value },
- { "fs", (int *)&ddb_regs.fs, db_i386_reg_value },
- { "gs", (int *)&ddb_regs.gs, db_i386_reg_value },
- { "ss", (int *)&ddb_regs.ss, db_i386_reg_value },
- { "eax",(int *)&ddb_regs.eax, db_i386_reg_value },
- { "ecx",(int *)&ddb_regs.ecx, db_i386_reg_value },
- { "edx",(int *)&ddb_regs.edx, db_i386_reg_value },
- { "ebx",(int *)&ddb_regs.ebx, db_i386_reg_value },
- { "esp",(int *)&ddb_regs.uesp,db_i386_reg_value },
- { "ebp",(int *)&ddb_regs.ebp, db_i386_reg_value },
- { "esi",(int *)&ddb_regs.esi, db_i386_reg_value },
- { "edi",(int *)&ddb_regs.edi, db_i386_reg_value },
- { "eip",(int *)&ddb_regs.eip, db_i386_reg_value },
- { "efl",(int *)&ddb_regs.efl, db_i386_reg_value },
+ { "cs", (long *)&ddb_regs.cs, db_i386_reg_value },
+ { "ds", (long *)&ddb_regs.ds, db_i386_reg_value },
+ { "es", (long *)&ddb_regs.es, db_i386_reg_value },
+ { "fs", (long *)&ddb_regs.fs, db_i386_reg_value },
+ { "gs", (long *)&ddb_regs.gs, db_i386_reg_value },
+ { "ss", (long *)&ddb_regs.ss, db_i386_reg_value },
+ { "eax",(long *)&ddb_regs.eax, db_i386_reg_value },
+ { "ecx",(long *)&ddb_regs.ecx, db_i386_reg_value },
+ { "edx",(long *)&ddb_regs.edx, db_i386_reg_value },
+ { "ebx",(long *)&ddb_regs.ebx, db_i386_reg_value },
+ { "esp",(long *)&ddb_regs.uesp,db_i386_reg_value },
+ { "ebp",(long *)&ddb_regs.ebp, db_i386_reg_value },
+ { "esi",(long *)&ddb_regs.esi, db_i386_reg_value },
+ { "edi",(long *)&ddb_regs.edi, db_i386_reg_value },
+ { "eip",(long *)&ddb_regs.eip, db_i386_reg_value },
+ { "efl",(long *)&ddb_regs.efl, db_i386_reg_value },
};
struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
@@ -82,8 +82,8 @@ struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
struct i386_frame {
struct i386_frame *f_frame;
- int f_retaddr;
- int f_arg0;
+ long f_retaddr;
+ long f_arg0;
};
#define TRAP 1
@@ -99,18 +99,18 @@ boolean_t db_trace_symbols_found = FALSE;
struct i386_kregs {
char *name;
- int offset;
+ long offset;
} i386_kregs[] = {
- { "ebx", (int)(&((struct i386_kernel_state *)0)->k_ebx) },
- { "esp", (int)(&((struct i386_kernel_state *)0)->k_esp) },
- { "ebp", (int)(&((struct i386_kernel_state *)0)->k_ebp) },
- { "edi", (int)(&((struct i386_kernel_state *)0)->k_edi) },
- { "esi", (int)(&((struct i386_kernel_state *)0)->k_esi) },
- { "eip", (int)(&((struct i386_kernel_state *)0)->k_eip) },
+ { "ebx", (long)(&((struct i386_kernel_state *)0)->k_ebx) },
+ { "esp", (long)(&((struct i386_kernel_state *)0)->k_esp) },
+ { "ebp", (long)(&((struct i386_kernel_state *)0)->k_ebp) },
+ { "edi", (long)(&((struct i386_kernel_state *)0)->k_edi) },
+ { "esi", (long)(&((struct i386_kernel_state *)0)->k_esi) },
+ { "eip", (long)(&((struct i386_kernel_state *)0)->k_eip) },
{ 0 },
};
-int *
+long *
db_lookup_i386_kreg(
char *name,
int *kregp)
@@ -119,19 +119,19 @@ db_lookup_i386_kreg(
for (kp = i386_kregs; kp->name; kp++) {
if (strcmp(name, kp->name) == 0)
- return (int *)((int)kregp + kp->offset);
+ return (long *)((long)kregp + kp->offset);
}
return 0;
}
-int
+long
db_i386_reg_value(
struct db_variable *vp,
db_expr_t *valuep,
int flag,
db_var_aux_param_t ap)
{
- int *dp = 0;
+ long *dp = 0;
db_expr_t null_reg = 0;
register thread_t thread = ap->thread;
extern unsigned int_stack_high;
@@ -153,15 +153,15 @@ db_i386_reg_value(
} else if ((thread->state & TH_SWAPPED) == 0 &&
thread->kernel_stack) {
dp = db_lookup_i386_kreg(vp->name,
- (int *)(STACK_IKS(thread->kernel_stack)));
+ (long *)(STACK_IKS(thread->kernel_stack)));
if (dp == 0)
dp = &null_reg;
} else if ((thread->state & TH_SWAPPED) &&
thread->swap_func != thread_exception_return) {
/*.....this breaks t/t $taskN.0...*/
/* only EIP is valid */
- if (vp->valuep == (int *) &ddb_regs.eip) {
- dp = (int *)(&thread->swap_func);
+ if (vp->valuep == (long *) &ddb_regs.eip) {
+ dp = (long *)(&thread->swap_func);
} else {
dp = &null_reg;
}
@@ -170,8 +170,8 @@ db_i386_reg_value(
if (dp == 0) {
if (thread->pcb == 0)
db_error("no pcb\n");
- dp = (int *)((int)(&thread->pcb->iss) +
- ((int)vp->valuep - (int)&ddb_regs));
+ dp = (long *)((long)(&thread->pcb->iss) +
+ ((long)vp->valuep - (long)&ddb_regs));
}
if (flag == DB_VAR_SET)
*dp = *valuep;
@@ -212,18 +212,18 @@ db_numargs(
struct i386_frame *fp,
task_t task)
{
- int *argp;
- int inst;
- int args;
+ long *argp;
+ long inst;
+ long args;
extern char etext[];
- argp = (int *)db_get_task_value((int)&fp->f_retaddr, 4, FALSE, task);
- if (argp < (int *)VM_MIN_KERNEL_ADDRESS || argp > (int *)etext)
+ argp = (long *)db_get_task_value((long)&fp->f_retaddr, sizeof(long), FALSE, task);
+ if (argp < (long *)VM_MIN_KERNEL_ADDRESS || argp > (long *)etext)
args = db_numargs_default;
- else if (!DB_CHECK_ACCESS((int)argp, 4, task))
+ else if (!DB_CHECK_ACCESS((long)argp, sizeof(long), task))
args = db_numargs_default;
else {
- inst = db_get_task_value((int)argp, 4, FALSE, task);
+ inst = db_get_task_value((long)argp, sizeof(long), FALSE, task);
if ((inst & 0xff) == 0x59) /* popl %ecx */
args = 1;
else if ((inst & 0xffff) == 0xc483) /* addl %n, %esp */
@@ -236,16 +236,16 @@ db_numargs(
struct interrupt_frame {
struct i386_frame *if_frame; /* point to next frame */
- int if_retaddr; /* return address to _interrupt */
- int if_unit; /* unit number */
+ long if_retaddr; /* return address to _interrupt */
+ long if_unit; /* unit number */
spl_t if_spl; /* saved spl */
- int if_iretaddr; /* _return_to_{iret,iret_i} */
- int if_edx; /* old sp(iret) or saved edx(iret_i) */
- int if_ecx; /* saved ecx(iret_i) */
- int if_eax; /* saved eax(iret_i) */
- int if_eip; /* saved eip(iret_i) */
- int if_cs; /* saved cs(iret_i) */
- int if_efl; /* saved efl(iret_i) */
+ long if_iretaddr; /* _return_to_{iret,iret_i} */
+ long if_edx; /* old sp(iret) or saved edx(iret_i) */
+ long if_ecx; /* saved ecx(iret_i) */
+ long if_eax; /* saved eax(iret_i) */
+ long if_eip; /* saved eip(iret_i) */
+ long if_cs; /* saved cs(iret_i) */
+ long if_efl; /* saved efl(iret_i) */
};
/*
@@ -263,7 +263,7 @@ db_nextframe(
struct i386_frame **lfp, /* in/out */
struct i386_frame **fp, /* in/out */
db_addr_t *ip, /* out */
- int frame_type, /* in */
+ long frame_type, /* in */
thread_t thread) /* in */
{
struct i386_saved_state *saved_regs;
@@ -277,7 +277,7 @@ db_nextframe(
* it is an (struct i386_saved_state *).
*/
saved_regs = (struct i386_saved_state *)
- db_get_task_value((int)&((*fp)->f_arg0),4,FALSE,task);
+ db_get_task_value((long)&((*fp)->f_arg0),sizeof(long),FALSE,task);
db_printf(">>>>> %s (%d) at ",
trap_name(saved_regs->trapno), saved_regs->trapno);
db_task_printsym(saved_regs->eip, DB_STGY_PROC, task);
@@ -310,10 +310,10 @@ db_nextframe(
default:
miss_frame:
*ip = (db_addr_t)
- db_get_task_value((int)&(*fp)->f_retaddr, 4, FALSE, task);
+ db_get_task_value((long)&(*fp)->f_retaddr, sizeof(long), FALSE, task);
*lfp = *fp;
*fp = (struct i386_frame *)
- db_get_task_value((int)&(*fp)->f_frame, 4, FALSE, task);
+ db_get_task_value((long)&(*fp)->f_frame, sizeof(long), FALSE, task);
break;
}
}
@@ -401,7 +401,7 @@ db_stack_trace_cmd(
} else {
frame = (struct i386_frame *)addr;
th = (db_default_thread)? db_default_thread: current_thread();
- callpc = (db_addr_t)db_get_task_value((int)&frame->f_retaddr, 4,
+ callpc = (db_addr_t)db_get_task_value((long)&frame->f_retaddr, sizeof(long),
FALSE,
(th == THREAD_NULL) ? TASK_NULL : th->task);
}
@@ -420,13 +420,13 @@ db_i386_stack_trace(
{
task_t task;
boolean_t kernel_only;
- int *argp;
- int user_frame = 0;
+ long *argp;
+ long user_frame = 0;
struct i386_frame *lastframe;
int frame_type;
char *filename;
int linenum;
- extern unsigned int db_maxoff;
+ extern unsigned long db_maxoff;
if (count == -1)
count = 65535;
@@ -438,7 +438,7 @@ db_i386_stack_trace(
if (!db_trace_symbols_found)
db_find_trace_symbols();
- if (!INKERNEL((unsigned)callpc) && !INKERNEL((unsigned)frame)) {
+ if (!INKERNEL((unsigned long)callpc) && !INKERNEL((unsigned long)frame)) {
db_printf(">>>>> user space <<<<<\n");
user_frame++;
}
@@ -449,7 +449,7 @@ db_i386_stack_trace(
char * name;
db_expr_t offset;
- if (INKERNEL((unsigned)callpc) && user_frame == 0) {
+ if (INKERNEL((unsigned long)callpc) && user_frame == 0) {
db_addr_t call_func = 0;
db_sym_t sym_tmp;
@@ -474,7 +474,7 @@ db_i386_stack_trace(
frame_type = 0;
narg = db_numargs(frame, task);
}
- } else if (INKERNEL((unsigned)callpc) ^ INKERNEL((unsigned)frame)) {
+ } else if (INKERNEL((unsigned long)callpc) ^ INKERNEL((unsigned long)frame)) {
frame_type = 0;
narg = -1;
} else {
@@ -492,7 +492,7 @@ db_i386_stack_trace(
argp = &frame->f_arg0;
while (narg > 0) {
- db_printf("%x", db_get_task_value((int)argp,4,FALSE,task));
+ db_printf("%x", db_get_task_value((long)argp,sizeof(long),FALSE,task));
argp++;
if (--narg != 0)
db_printf(",");
@@ -519,7 +519,7 @@ db_i386_stack_trace(
break;
}
if (!INKERNEL(lastframe) ||
- (!INKERNEL((unsigned)callpc) && !INKERNEL((unsigned)frame)))
+ (!INKERNEL((unsigned long)callpc) && !INKERNEL((unsigned long)frame)))
user_frame++;
if (user_frame == 1) {
db_printf(">>>>> user space <<<<<\n");
@@ -570,6 +570,7 @@ static void db_cproc_state(
}
/* offsets in a cproc structure */
+/* TODO: longs? */
int db_cproc_next_offset = 0 * 4;
int db_cproc_incarnation_offset = 1 * 4;
int db_cproc_list_offset = 2 * 4;
diff --git a/i386/i386/debug_i386.c b/i386/i386/debug_i386.c
index f3e18835..937d7b4b 100644
--- a/i386/i386/debug_i386.c
+++ b/i386/i386/debug_i386.c
@@ -30,19 +30,21 @@
void dump_ss(struct i386_saved_state *st)
{
printf("Dump of i386_saved_state %p:\n", st);
- printf("EAX %08x EBX %08x ECX %08x EDX %08x\n",
+ printf("EAX %08lx EBX %08lx ECX %08lx EDX %08lx\n",
st->eax, st->ebx, st->ecx, st->edx);
- printf("ESI %08x EDI %08x EBP %08x ESP %08x\n",
+ printf("ESI %08lx EDI %08lx EBP %08lx ESP %08lx\n",
st->esi, st->edi, st->ebp, st->uesp);
- printf("CS %04x SS %04x DS %04x ES %04x FS %04x GS %04x\n",
+ printf("CS %04lx SS %04lx "
+ "DS %04lx ES %04lx "
+ "FS %04lx GS %04lx\n",
st->cs & 0xffff, st->ss & 0xffff,
st->ds & 0xffff, st->es & 0xffff,
st->fs & 0xffff, st->gs & 0xffff);
- printf("v86: DS %04x ES %04x FS %04x GS %04x\n",
+ printf("v86: DS %04lx ES %04lx FS %04lx GS %04lx\n",
st->v86_segs.v86_ds & 0xffff, st->v86_segs.v86_es & 0xffff,
st->v86_segs.v86_gs & 0xffff, st->v86_segs.v86_gs & 0xffff);
- printf("EIP %08x EFLAGS %08x\n", st->eip, st->efl);
- printf("trapno %d: %s, error %08x\n",
+ printf("EIP %08lx EFLAGS %08lx\n", st->eip, st->efl);
+ printf("trapno %ld: %s, error %08lx\n",
st->trapno, trap_name(st->trapno),
st->err);
}
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c
index 2a4b9c09..8efe9e80 100644
--- a/i386/i386/fpu.c
+++ b/i386/i386/fpu.c
@@ -44,20 +44,19 @@
#include <kern/debug.h>
#include <machine/machspl.h> /* spls */
-#include <kern/mach_param.h>
#include <kern/printf.h>
#include <kern/thread.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <i386/thread.h>
#include <i386/fpu.h>
#include <i386/pio.h>
+#include <i386/pic.h>
#include <i386/locore.h>
#include "cpu_number.h"
#if 0
#include <i386/ipl.h>
-extern int curr_ipl;
#define ASSERT_IPL(L) \
{ \
if (curr_ipl != L) { \
@@ -72,7 +71,7 @@ extern int curr_ipl;
extern void i386_exception();
int fp_kind = FP_387; /* 80387 present */
-zone_t ifps_zone; /* zone for FPU save area */
+struct kmem_cache ifps_cache; /* cache for FPU save area */
static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND user-provided mxcsr with this security mask */
void fp_save(thread_t thread);
@@ -110,9 +109,9 @@ init_fpu()
{
unsigned short status, control;
-#ifdef MACH_HYP
+#ifdef MACH_RING1
clear_ts();
-#else /* MACH_HYP */
+#else /* MACH_RING1 */
unsigned int native = 0;
if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486)
@@ -124,7 +123,7 @@ init_fpu()
* the control and status registers.
*/
set_cr0((get_cr0() & ~(CR0_EM|CR0_TS)) | native); /* allow use of FPU */
-#endif /* MACH_HYP */
+#endif /* MACH_RING1 */
fninit();
status = fnstsw();
@@ -158,10 +157,10 @@ init_fpu()
struct i386_xfp_save save;
unsigned long mask;
fp_kind = FP_387X;
-#ifndef MACH_HYP
+#ifndef MACH_RING1
printf("Enabling FXSR\n");
set_cr4(get_cr4() | CR4_OSFXSR);
-#endif /* MACH_HYP */
+#endif /* MACH_RING1 */
fxsave(&save);
mask = save.fp_mxcsr_mask;
if (!mask)
@@ -170,14 +169,14 @@ init_fpu()
} else
fp_kind = FP_387;
}
-#ifdef MACH_HYP
+#ifdef MACH_RING1
set_ts();
-#else /* MACH_HYP */
+#else /* MACH_RING1 */
/*
* Trap wait instructions. Turn off FPU for now.
*/
set_cr0(get_cr0() | CR0_TS | CR0_MP);
-#endif /* MACH_HYP */
+#endif /* MACH_RING1 */
}
else {
/*
@@ -193,10 +192,9 @@ init_fpu()
void
fpu_module_init()
{
- ifps_zone = zinit(sizeof(struct i386_fpsave_state), 16,
- THREAD_MAX * sizeof(struct i386_fpsave_state),
- THREAD_CHUNK * sizeof(struct i386_fpsave_state),
- 0, "i386 fpsave state");
+ kmem_cache_init(&ifps_cache, "i386_fpsave_state",
+ sizeof(struct i386_fpsave_state), 16,
+ NULL, NULL, NULL, 0);
}
/*
@@ -221,7 +219,7 @@ ASSERT_IPL(SPL0);
clear_fpu();
}
#endif /* NCPUS == 1 */
- zfree(ifps_zone, (vm_offset_t) fps);
+ kmem_cache_free(&ifps_cache, (vm_offset_t) fps);
}
/* The two following functions were stolen from Linux's i387.c */
@@ -335,7 +333,7 @@ ASSERT_IPL(SPL0);
simple_unlock(&pcb->lock);
if (ifps != 0) {
- zfree(ifps_zone, (vm_offset_t) ifps);
+ kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
}
}
else {
@@ -356,7 +354,7 @@ ASSERT_IPL(SPL0);
if (ifps == 0) {
if (new_ifps == 0) {
simple_unlock(&pcb->lock);
- new_ifps = (struct i386_fpsave_state *) zalloc(ifps_zone);
+ new_ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
goto Retry;
}
ifps = new_ifps;
@@ -396,7 +394,7 @@ ASSERT_IPL(SPL0);
simple_unlock(&pcb->lock);
if (new_ifps != 0)
- zfree(ifps_zone, (vm_offset_t) new_ifps);
+ kmem_cache_free(&ifps_cache, (vm_offset_t) new_ifps);
}
return KERN_SUCCESS;
@@ -609,7 +607,7 @@ fpextovrflt()
clear_fpu();
if (ifps)
- zfree(ifps_zone, (vm_offset_t) ifps);
+ kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
/*
* Raise exception.
@@ -686,7 +684,7 @@ fpexterrflt()
/*NOTREACHED*/
}
-#ifndef MACH_XEN
+#ifndef MACH_RING1
/*
* FPU error. Called by AST.
*/
@@ -743,7 +741,7 @@ ASSERT_IPL(SPL0);
thread->pcb->ims.ifps->fp_save_state.fp_status);
/*NOTREACHED*/
}
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
/*
* Save FPU state.
@@ -785,7 +783,7 @@ fp_load(thread)
ASSERT_IPL(SPL0);
ifps = pcb->ims.ifps;
if (ifps == 0) {
- ifps = (struct i386_fpsave_state *) zalloc(ifps_zone);
+ ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
memset(ifps, 0, sizeof *ifps);
pcb->ims.ifps = ifps;
fpinit();
@@ -836,7 +834,7 @@ fp_state_alloc()
pcb_t pcb = current_thread()->pcb;
struct i386_fpsave_state *ifps;
- ifps = (struct i386_fpsave_state *)zalloc(ifps_zone);
+ ifps = (struct i386_fpsave_state *)kmem_cache_alloc(&ifps_cache);
memset(ifps, 0, sizeof *ifps);
pcb->ims.ifps = ifps;
@@ -865,7 +863,7 @@ fp_state_alloc()
* This comes in on line 5 of the slave PIC at SPL1.
*/
void
-fpintr()
+fpintr(int unit)
{
spl_t s;
thread_t thread = current_thread();
diff --git a/i386/i386/fpu.h b/i386/i386/fpu.h
index 1a1b61f6..21561875 100644
--- a/i386/i386/fpu.h
+++ b/i386/i386/fpu.h
@@ -124,5 +124,6 @@ extern void fpextovrflt(void);
extern void fpexterrflt(void);
extern void fpastintr(void);
extern void init_fpu(void);
+extern void fpintr(int unit);
#endif /* _I386_FPU_H_ */
diff --git a/i386/i386/gdt.c b/i386/i386/gdt.c
index f26a50c9..5523fea3 100644
--- a/i386/i386/gdt.c
+++ b/i386/i386/gdt.c
@@ -39,10 +39,10 @@
#include "seg.h"
#include "gdt.h"
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
/* It is actually defined in xen_boothdr.S */
extern
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
struct real_descriptor gdt[GDTSZ];
void
@@ -57,25 +57,29 @@ gdt_init()
LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
LINEAR_MAX_KERNEL_ADDRESS - (LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) - 1,
ACC_PL_K|ACC_DATA_W, SZ_32);
-#ifndef MACH_HYP
+#ifndef MACH_PV_DESCRIPTORS
fill_gdt_descriptor(LINEAR_DS,
0,
0xffffffff,
ACC_PL_K|ACC_DATA_W, SZ_32);
-#endif /* MACH_HYP */
+#endif /* MACH_PV_DESCRIPTORS */
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
unsigned long frame = kv_to_mfn(gdt);
pmap_set_page_readonly(gdt);
if (hyp_set_gdt(kv_to_la(&frame), GDTSZ))
panic("couldn't set gdt\n");
+#endif
+#ifdef MACH_PV_PAGETABLES
if (hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments))
panic("couldn't set 4gb segments vm assist");
#if 0
if (hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify))
panic("couldn't set 4gb segments vm assist notify");
#endif
-#else /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
+
+#ifndef MACH_PV_DESCRIPTORS
/* Load the new GDT. */
{
struct pseudo_descriptor pdesc;
@@ -84,7 +88,7 @@ gdt_init()
pdesc.linear_base = kvtolin(&gdt);
lgdt(&pdesc);
}
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
/* Reload all the segment registers from the new GDT.
We must load ds and es with 0 before loading them with KERNEL_DS
@@ -101,7 +105,7 @@ gdt_init()
"movw %w1,%%es\n"
"movw %w1,%%ss\n"
: : "i" (KERNEL_CS), "r" (KERNEL_DS), "r" (0));
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
#if VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
/* things now get shifted */
#ifdef MACH_PSEUDO_PHYS
@@ -109,6 +113,6 @@ gdt_init()
#endif /* MACH_PSEUDO_PHYS */
la_shift += LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
#endif
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
diff --git a/i386/i386/gdt.h b/i386/i386/gdt.h
index 41ace791..d865640b 100644
--- a/i386/i386/gdt.h
+++ b/i386/i386/gdt.h
@@ -40,16 +40,16 @@
*/
#define KERNEL_CS (0x08 | KERNEL_RING) /* kernel code */
#define KERNEL_DS (0x10 | KERNEL_RING) /* kernel data */
-#ifndef MACH_XEN
+#ifndef MACH_PV_DESCRIPTORS
#define KERNEL_LDT 0x18 /* master LDT */
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
#define KERNEL_TSS 0x20 /* master TSS (uniprocessor) */
#define USER_LDT 0x28 /* place for per-thread LDT */
#define USER_TSS 0x30 /* place for per-thread TSS
that holds IO bitmap */
-#ifndef MACH_HYP
+#ifndef MACH_PV_DESCRIPTORS
#define LINEAR_DS 0x38 /* linear mapping */
-#endif /* MACH_HYP */
+#endif /* MACH_PV_DESCRIPTORS */
/* 0x40 was USER_FPREGS, now free */
#define USER_GDT 0x48 /* user-defined GDT entries */
diff --git a/i386/i386/hardclock.c b/i386/i386/hardclock.c
index a8846b95..66150332 100644
--- a/i386/i386/hardclock.c
+++ b/i386/i386/hardclock.c
@@ -39,13 +39,17 @@
#include <i386/ipl.h>
#endif
-extern void clock_interrupt();
+#ifdef LINUX_DEV
+#include <linux/dev/glue/glue.h>
+#endif
+
extern char return_to_iret[];
void
-hardclock(iunit, old_ipl, ret_addr, regs)
+hardclock(iunit, old_ipl, irq, ret_addr, regs)
int iunit; /* 'unit' number */
int old_ipl; /* old interrupt level */
+ int irq; /* irq number */
char * ret_addr; /* return address in interrupt handler */
struct i386_interrupt_state *regs;
/* saved registers */
diff --git a/i386/i386/i386asm.sym b/i386/i386/i386asm.sym
index b1670e8b..dd1a2edf 100644
--- a/i386/i386/i386asm.sym
+++ b/i386/i386/i386asm.sym
@@ -78,6 +78,7 @@ offset i386_saved_state r err
offset i386_saved_state r efl R_EFLAGS
offset i386_saved_state r eip
offset i386_saved_state r cr2
+offset i386_saved_state r edi
offset i386_interrupt_state i eip
offset i386_interrupt_state i cs
@@ -86,12 +87,14 @@ offset i386_interrupt_state i efl
offset i386_tss tss esp0
offset i386_tss tss ss0
+offset machine_slot sub_type cpu_type
+
expr I386_PGBYTES NBPG
expr VM_MIN_ADDRESS
expr VM_MAX_ADDRESS
expr VM_MIN_KERNEL_ADDRESS KERNELBASE
expr KERNEL_STACK_SIZE
-#if VM_MIN_KERNEL_ADDRESS == LINEAR_MIN_KERNEL_ADDRESS
+#if defined MACH_PSEUDO_PHYS && (VM_MIN_KERNEL_ADDRESS == LINEAR_MIN_KERNEL_ADDRESS)
expr PFN_LIST pfn_list
#endif
@@ -121,9 +124,9 @@ expr KERNEL_RING
expr KERNEL_CS
expr KERNEL_DS
expr KERNEL_TSS
-#ifndef MACH_XEN
+#ifndef MACH_PV_DESCRIPTORS
expr KERNEL_LDT
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
expr (VM_MIN_KERNEL_ADDRESS>>PDESHIFT)*sizeof(pt_entry_t) KERNELBASEPDE
@@ -148,5 +151,7 @@ offset shared_info si vcpu_info[0].evtchn_upcall_pending CPU_PENDING
offset shared_info si vcpu_info[0].evtchn_pending_sel CPU_PENDING_SEL
offset shared_info si evtchn_pending PENDING
offset shared_info si evtchn_mask EVTMASK
+#ifdef MACH_PV_PAGETABLES
offset shared_info si vcpu_info[0].arch.cr2 CR2
+#endif /* MACH_PV_PAGETABLES */
#endif /* MACH_XEN */
diff --git a/i386/i386/idt.c b/i386/i386/idt.c
index b5e3d080..882764f4 100644
--- a/i386/i386/idt.c
+++ b/i386/i386/idt.c
@@ -38,10 +38,10 @@ extern struct idt_init_entry idt_inittab[];
void idt_init()
{
-#ifdef MACH_HYP
+#ifdef MACH_PV_DESCRIPTORS
if (hyp_set_trap_table(kvtolin(idt_inittab)))
panic("couldn't set trap table\n");
-#else /* MACH_HYP */
+#else /* MACH_PV_DESCRIPTORS */
struct idt_init_entry *iie = idt_inittab;
/* Initialize the exception vectors from the idt_inittab. */
@@ -59,6 +59,6 @@ void idt_init()
pdesc.linear_base = kvtolin(&idt);
lidt(&pdesc);
}
-#endif /* MACH_HYP */
+#endif /* MACH_PV_DESCRIPTORS */
}
diff --git a/i386/i386/idt_inittab.S b/i386/i386/idt_inittab.S
index 63e554bd..8e92d805 100644
--- a/i386/i386/idt_inittab.S
+++ b/i386/i386/idt_inittab.S
@@ -39,7 +39,7 @@ ENTRY(idt_inittab)
/*
* Interrupt descriptor table and code vectors for it.
*/
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
#define IDT_ENTRY(n,entry,type) \
.data 2 ;\
.byte n ;\
@@ -47,14 +47,14 @@ ENTRY(idt_inittab)
.word KERNEL_CS ;\
.long entry ;\
.text
-#else /* MACH_XEN */
+#else /* MACH_PV_DESCRIPTORS */
#define IDT_ENTRY(n,entry,type) \
.data 2 ;\
.long entry ;\
.word n ;\
.word type ;\
.text
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
/*
* No error code. Clear error code and push trap number.
@@ -108,7 +108,7 @@ EXCEP_SPC(0x0b,t_segnp)
EXCEP_ERR(0x0c,t_stack_fault)
EXCEP_SPC(0x0d,t_gen_prot)
EXCEP_SPC(0x0e,t_page_fault)
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
EXCEP_ERR(0x0f,t_trap_0f)
#else
EXCEPTION(0x0f,t_trap_0f)
@@ -133,7 +133,7 @@ EXCEPTION(0x1f,t_trap_1f)
/* Terminator */
.data 2
.long 0
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
.long 0
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
diff --git a/i386/i386/io_map.c b/i386/i386/io_map.c
index 5b77552d..b095f224 100644
--- a/i386/i386/io_map.c
+++ b/i386/i386/io_map.c
@@ -49,7 +49,7 @@ io_map(phys_addr, size)
*/
start = kernel_virtual_start;
kernel_virtual_start += round_page(size);
- printf("stealing kernel virtual addresses %08x-%08x\n", start, kernel_virtual_start);
+ printf("stealing kernel virtual addresses %08lx-%08lx\n", start, kernel_virtual_start);
}
else {
(void) kmem_alloc_pageable(kernel_map, &start, round_page(size));
diff --git a/i386/i386/io_perm.c b/i386/i386/io_perm.c
index df25cc63..8bacb8d5 100644
--- a/i386/i386/io_perm.c
+++ b/i386/i386/io_perm.c
@@ -54,7 +54,8 @@
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
+#include <kern/kalloc.h>
#include <kern/lock.h>
#include <kern/queue.h>
#include <kern/thread.h>
@@ -257,12 +258,12 @@ i386_io_perm_modify (task_t target_task, io_perm_t io_perm, boolean_t enable)
if (!iopb)
{
simple_unlock (&target_task->machine.iopb_lock);
- iopb = (unsigned char *) zalloc (machine_task_iopb_zone);
+ iopb = (unsigned char *) kmem_cache_alloc (&machine_task_iopb_cache);
simple_lock (&target_task->machine.iopb_lock);
if (target_task->machine.iopb)
{
if (iopb)
- zfree (machine_task_iopb_zone, (vm_offset_t) iopb);
+ kmem_cache_free (&machine_task_iopb_cache, (vm_offset_t) iopb);
iopb = target_task->machine.iopb;
iopb_size = target_task->machine.iopb_size;
}
diff --git a/i386/i386/ipl.h b/i386/i386/ipl.h
index 557cd8df..8f729e1d 100644
--- a/i386/i386/ipl.h
+++ b/i386/i386/ipl.h
@@ -73,5 +73,6 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
extern void (*ivect[])();
extern int iunit[];
extern int intpri[];
+extern spl_t curr_ipl;
#endif /* __ASSEMBLER__ */
#endif /* KERNEL */
diff --git a/i386/i386/ktss.c b/i386/i386/ktss.c
index 66432f3e..1b2938a0 100644
--- a/i386/i386/ktss.c
+++ b/i386/i386/ktss.c
@@ -45,12 +45,12 @@ ktss_init()
/* XXX temporary exception stack */
static int exception_stack[1024];
-#ifdef MACH_XEN
+#ifdef MACH_RING1
/* Xen won't allow us to do any I/O by default anyway, just register
* exception stack */
- if (hyp_stack_switch(KERNEL_DS, (unsigned)(exception_stack+1024)))
+ if (hyp_stack_switch(KERNEL_DS, (unsigned long)(exception_stack+1024)))
panic("couldn't register exception stack\n");
-#else /* MACH_XEN */
+#else /* MACH_RING1 */
/* Initialize the master TSS descriptor. */
fill_gdt_descriptor(KERNEL_TSS,
kvtolin(&ktss), sizeof(struct task_tss) - 1,
@@ -58,13 +58,13 @@ ktss_init()
/* Initialize the master TSS. */
ktss.tss.ss0 = KERNEL_DS;
- ktss.tss.esp0 = (unsigned)(exception_stack+1024);
+ ktss.tss.esp0 = (unsigned long)(exception_stack+1024);
ktss.tss.io_bit_map_offset = IOPB_INVAL;
/* Set the last byte in the I/O bitmap to all 1's. */
ktss.barrier = 0xff;
/* Load the TSS. */
ltr(KERNEL_TSS);
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
}
diff --git a/i386/i386/ldt.c b/i386/i386/ldt.c
index 0ef7a8c4..43b9efb5 100644
--- a/i386/i386/ldt.c
+++ b/i386/i386/ldt.c
@@ -39,23 +39,25 @@
extern int syscall();
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
/* It is actually defined in xen_boothdr.S */
extern
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
struct real_descriptor ldt[LDTSZ];
void
ldt_init()
{
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readwrite(ldt);
-#else /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
+#else /* MACH_PV_DESCRIPTORS */
/* Initialize the master LDT descriptor in the GDT. */
fill_gdt_descriptor(KERNEL_LDT,
kvtolin(&ldt), sizeof(ldt)-1,
ACC_PL_K|ACC_LDT, 0);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
/* Initialize the LDT descriptors. */
fill_ldt_gate(USER_SCALL,
@@ -72,9 +74,9 @@ ldt_init()
ACC_PL_U|ACC_DATA_W, SZ_32);
/* Activate the LDT. */
-#ifdef MACH_HYP
+#ifdef MACH_PV_DESCRIPTORS
hyp_set_ldt(&ldt, LDTSZ);
-#else /* MACH_HYP */
+#else /* MACH_PV_DESCRIPTORS */
lldt(KERNEL_LDT);
-#endif /* MACH_HYP */
+#endif /* MACH_PV_DESCRIPTORS */
}
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
index 11214e25..eeef09d7 100644
--- a/i386/i386/locore.S
+++ b/i386/i386/locore.S
@@ -442,12 +442,12 @@ ENTRY(t_debug)
ENTRY(t_page_fault)
pushl $(T_PAGE_FAULT) /* mark a page fault trap */
pusha /* save the general registers */
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
movl %ss:hyp_shared_info+CR2,%eax
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
movl %cr2,%eax /* get the faulting address */
-#endif /* MACH_XEN */
- movl %eax,12(%esp) /* save in esp save slot */
+#endif /* MACH_PV_PAGETABLES */
+ movl %eax,R_CR2-R_EDI(%esp) /* save in esp save slot */
jmp trap_push_segs /* continue fault */
/*
@@ -785,6 +785,7 @@ ast_from_interrupt:
* #ifndef MACH_XEN
* iunit
* saved SPL
+ * irq
* #endif
* return address == return_to_iret_i
* saved %edx
@@ -800,6 +801,7 @@ ast_from_interrupt:
* #ifndef MACH_XEN
* iunit
* saved SPL
+ * irq
* #endif
* return address == return_to_iret
* pointer to save area on old stack
@@ -1299,48 +1301,51 @@ Entry(copyoutmsg)
movl $USER_DS,%eax /* use user data segment for accesses */
mov %ax,%es
+#if !defined(MACH_HYP) && !PAE
+ cmpl $3,machine_slot+SUB_TYPE_CPU_TYPE
+ jbe copyout_retry /* Use slow version on i386 */
+#endif /* !defined(MACH_HYP) && !PAE */
+
+ movl %edx,%eax /* use count */
+ /*cld*/ /* count up: always this way in GCC code */
+ movl %eax,%ecx /* move by longwords first */
+ shrl $2,%ecx
+ RECOVER(copyout_fail)
+ rep
+ movsl
+ movl %eax,%ecx /* now move remaining bytes */
+ andl $3,%ecx
+ RECOVER(copyout_fail)
+ rep
+ movsb /* move */
+ xorl %eax,%eax /* return 0 for success */
+
+copyout_ret:
+ mov %ss,%di /* restore ES to kernel segment */
+ mov %di,%es
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyout_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyout_ret /* pop frame and return */
+
+#if !defined(MACH_HYP) && !PAE
/*
* Check whether user address space is writable
- * before writing to it - hardware is broken.
- * XXX only have to do this on 386's.
+ * before writing to it - i386 hardware is broken.
*/
copyout_retry:
-#ifdef MACH_HYP
- movl cr3,%ecx /* point to page directory */
-#else /* MACH_HYP */
movl %cr3,%ecx /* point to page directory */
-#endif /* MACH_HYP */
-#if PAE
- movl %edi,%eax /* get page directory pointer bits */
- shrl $(PDPSHIFT),%eax /* from user address */
- movl KERNELBASE(%ecx,%eax,PTE_SIZE),%ecx
- /* get page directory pointer */
-#ifdef MACH_PSEUDO_PHYS
- shrl $(PTESHIFT),%ecx
- movl pfn_list,%eax
- movl (%eax,%ecx,4),%ecx /* mfn_to_pfn */
- shll $(PTESHIFT),%ecx
-#else /* MACH_PSEUDO_PHYS */
- andl $(PTE_PFN),%ecx /* isolate page frame address */
-#endif /* MACH_PSEUDO_PHYS */
-#endif /* PAE */
movl %edi,%eax /* get page directory bits */
shrl $(PDESHIFT),%eax /* from user address */
-#if PAE
- andl $(PDEMASK),%eax
-#endif /* PAE */
movl KERNELBASE(%ecx,%eax,PTE_SIZE),%ecx
/* get page directory pointer */
testl $(PTE_V),%ecx /* present? */
jz 0f /* if not, fault is OK */
-#ifdef MACH_PSEUDO_PHYS
- shrl $(PTESHIFT),%ecx
- movl pfn_list,%eax
- movl (%eax,%ecx,4),%ecx /* mfn_to_pfn */
- shll $(PTESHIFT),%ecx
-#else /* MACH_PSEUDO_PHYS */
andl $(PTE_PFN),%ecx /* isolate page frame address */
-#endif /* MACH_PSEUDO_PHYS */
movl %edi,%eax /* get page table bits */
shrl $(PTESHIFT),%eax
andl $(PTEMASK),%eax /* from user address */
@@ -1354,17 +1359,9 @@ copyout_retry:
/*
* Not writable - must fake a fault. Turn off access to the page.
*/
-#ifdef MACH_HYP
- pushl %edx
- pushl %ecx
- call hyp_invalidate_pte
- popl %ecx
- popl %edx
-#else /* MACH_HYP */
andl $(PTE_INVALID),(%ecx) /* turn off valid bit */
movl %cr3,%eax /* invalidate TLB */
movl %eax,%cr3
-#endif /* MACH_HYP */
0:
/*
@@ -1395,18 +1392,8 @@ copyout_retry:
subl %eax,%edx /* decrement count */
jg copyout_retry /* restart on next page if not done */
xorl %eax,%eax /* return 0 for success */
-
-copyout_ret:
- mov %ss,%di /* restore ES to kernel segment */
- mov %di,%es
-
- popl %edi /* restore registers */
- popl %esi
- ret /* and return */
-
-copyout_fail:
- movl $1,%eax /* return 1 for failure */
- jmp copyout_ret /* pop frame and return */
+ jmp copyout_ret
+#endif /* !defined(MACH_HYP) && !PAE */
/*
* int inst_fetch(int eip, int cs);
@@ -1436,14 +1423,14 @@ _inst_fetch_fault:
ENTRY(dr6)
-#ifdef MACH_XEN
+#ifdef MACH_RING1
pushl %ebx
movl $6, %ebx
call __hyp_get_debugreg
popl %ebx
-#else /* MACH_XEN */
+#else /* MACH_RING1 */
movl %db6, %eax
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
ret
/* dr<i>(address, type, len, persistence)
@@ -1451,67 +1438,67 @@ ENTRY(dr6)
ENTRY(dr0)
movl S_ARG0, %eax
movl %eax,EXT(dr_addr)
-#ifdef MACH_XEN
+#ifdef MACH_RING1
pushl %ebx
movl $0,%ebx
movl %eax,%ecx
call __hyp_set_debugreg
-#else /* MACH_XEN */
+#else /* MACH_RING1 */
movl %eax, %db0
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
movl $0, %ecx
jmp 0f
ENTRY(dr1)
movl S_ARG0, %eax
movl %eax,EXT(dr_addr)+1*4
-#ifdef MACH_XEN
+#ifdef MACH_RING1
pushl %ebx
movl $1,%ebx
movl %eax,%ecx
call __hyp_set_debugreg
-#else /* MACH_XEN */
+#else /* MACH_RING1 */
movl %eax, %db1
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
movl $2, %ecx
jmp 0f
ENTRY(dr2)
movl S_ARG0, %eax
movl %eax,EXT(dr_addr)+2*4
-#ifdef MACH_XEN
+#ifdef MACH_RING1
pushl %ebx
movl $2,%ebx
movl %eax,%ecx
call __hyp_set_debugreg
-#else /* MACH_XEN */
+#else /* MACH_RING1 */
movl %eax, %db2
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
movl $4, %ecx
jmp 0f
ENTRY(dr3)
movl S_ARG0, %eax
movl %eax,EXT(dr_addr)+3*4
-#ifdef MACH_XEN
+#ifdef MACH_RING1
pushl %ebx
movl $3,%ebx
movl %eax,%ecx
call __hyp_set_debugreg
-#else /* MACH_XEN */
+#else /* MACH_RING1 */
movl %eax, %db3
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
movl $6, %ecx
0:
pushl %ebp
movl %esp, %ebp
-#ifdef MACH_XEN
+#ifdef MACH_RING1
movl $7,%ebx
call __hyp_get_debugreg
movl %eax, %edx
-#else /* MACH_XEN */
+#else /* MACH_RING1 */
movl %db7, %edx
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
movl %edx,EXT(dr_addr)+4*4
andl dr_msk(,%ecx,2),%edx /* clear out new entry */
movl %edx,EXT(dr_addr)+5*4
@@ -1533,14 +1520,14 @@ ENTRY(dr3)
shll %cl, %eax
orl %eax, %edx
-#ifdef MACH_XEN
+#ifdef MACH_RING1
movl $7,%ebx
movl %edx, %ecx
call __hyp_set_debugreg
popl %ebx
-#else /* MACH_XEN */
+#else /* MACH_RING1 */
movl %edx, %db7
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
movl %edx,EXT(dr_addr)+7*4
movl %edx, %eax
leave
diff --git a/i386/i386/locore.h b/i386/i386/locore.h
index 57014304..bfd13177 100644
--- a/i386/i386/locore.h
+++ b/i386/i386/locore.h
@@ -1,6 +1,5 @@
/*
- * Header file for printf type functions.
- * Copyright (C) 2006 Free Software Foundation.
+ * Copyright (C) 2006, 2011 Free Software Foundation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,16 +23,39 @@
#include <kern/sched_prim.h>
-extern int copyin (const void *userbuf, void *kernelbuf, size_t cn);
+/*
+ * Fault recovery in copyin/copyout routines.
+ */
+struct recovery {
+ int fault_addr;
+ int recover_addr;
+};
-extern int copyinmsg (const void *userbuf, void *kernelbuf, size_t cn);
+extern struct recovery recover_table[];
+extern struct recovery recover_table_end[];
-extern int copyout (const void *kernelbuf, void *userbuf, size_t cn);
+/*
+ * Recovery from Successful fault in copyout does not
+ * return directly - it retries the pte check, since
+ * the 386 ignores write protection in kernel mode.
+ */
+extern struct recovery retry_table[];
+extern struct recovery retry_table_end[];
-extern int copyoutmsg (const void *kernelbuf, void *userbuf, size_t cn);
extern int call_continuation (continuation_t continuation);
+extern int discover_x86_cpu_type (void);
+
+extern int copyin (const void *userbuf, void *kernelbuf, size_t cn);
+extern int copyinmsg (const void *userbuf, void *kernelbuf, size_t cn);
+extern int copyout (const void *kernelbuf, void *userbuf, size_t cn);
+extern int copyoutmsg (const void *kernelbuf, void *userbuf, size_t cn);
+
+extern int inst_fetch (int eip, int cs);
+
+extern void cpu_shutdown (void);
+
extern unsigned int cpu_features[1];
#define CPU_FEATURE_FPU 0
diff --git a/i386/i386/machine_task.c b/i386/i386/machine_task.c
index 35b89e02..62b22e3a 100644
--- a/i386/i386/machine_task.c
+++ b/i386/i386/machine_task.c
@@ -22,15 +22,14 @@
#include <kern/lock.h>
#include <mach/mach_types.h>
-#include <kern/zalloc.h>
-#include <kern/mach_param.h>
+#include <kern/slab.h>
#include <machine/task.h>
#include <machine/io_perm.h>
-/* The zone which holds our IO permission bitmaps. */
-zone_t machine_task_iopb_zone;
+/* The cache which holds our IO permission bitmaps. */
+struct kmem_cache machine_task_iopb_cache;
/* Initialize the machine task module. The function is called once at
@@ -38,11 +37,8 @@ zone_t machine_task_iopb_zone;
void
machine_task_module_init (void)
{
- machine_task_iopb_zone = zinit (IOPB_BYTES, 0,
- TASK_MAX * IOPB_BYTES,
- IOPB_BYTES,
- ZONE_COLLECTABLE | ZONE_EXHAUSTIBLE,
- "i386 machine task iopb");
+ kmem_cache_init (&machine_task_iopb_cache, "i386_task_iopb", IOPB_BYTES, 0,
+ NULL, NULL, NULL, 0);
}
@@ -62,7 +58,8 @@ void
machine_task_terminate (task_t task)
{
if (task->machine.iopb)
- zfree (machine_task_iopb_zone, (vm_offset_t) task->machine.iopb);
+ kmem_cache_free (&machine_task_iopb_cache,
+ (vm_offset_t) task->machine.iopb);
}
@@ -74,7 +71,8 @@ machine_task_collect (task_t task)
simple_lock (&task->machine.iopb_lock);
if (task->machine.iopb_size == 0 && task->machine.iopb)
{
- zfree (machine_task_iopb_zone, (vm_offset_t) task->machine.iopb);
+ kmem_cache_free (&machine_task_iopb_cache,
+ (vm_offset_t) task->machine.iopb);
task->machine.iopb = 0;
}
simple_unlock (&task->machine.iopb_lock);
diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
index 2fd5ec2d..95f55af2 100644
--- a/i386/i386/mp_desc.c
+++ b/i386/i386/mp_desc.c
@@ -150,9 +150,9 @@ mp_desc_init(mycpu)
* Fix up the entries in the GDT to point to
* this LDT and this TSS.
*/
-#ifdef MACH_HYP
+#ifdef MACH_RING1
panic("TODO %s:%d\n",__FILE__,__LINE__);
-#else /* MACH_HYP */
+#else /* MACH_RING1 */
fill_descriptor(&mpt->gdt[sel_idx(KERNEL_LDT)],
(unsigned)&mpt->ldt,
LDTSZ * sizeof(struct real_descriptor) - 1,
@@ -165,7 +165,7 @@ mp_desc_init(mycpu)
mpt->ktss.tss.ss0 = KERNEL_DS;
mpt->ktss.tss.io_bit_map_offset = IOPB_INVAL;
mpt->ktss.barrier = 0xFF;
-#endif /* MACH_HYP */
+#endif /* MACH_RING1 */
return mpt;
}
diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c
index f687db14..7d632a6b 100644
--- a/i386/i386/pcb.c
+++ b/i386/i386/pcb.c
@@ -36,9 +36,9 @@
#include "vm_param.h"
#include <kern/counters.h>
#include <kern/debug.h>
-#include <kern/mach_param.h>
#include <kern/thread.h>
#include <kern/sched_prim.h>
+#include <kern/slab.h>
#include <vm/vm_kern.h>
#include <vm/pmap.h>
@@ -65,7 +65,7 @@ extern void Thread_continue();
extern void user_ldt_free();
-zone_t pcb_zone;
+struct kmem_cache pcb_cache;
vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */
@@ -78,7 +78,7 @@ vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */
void stack_attach(thread, stack, continuation)
register thread_t thread;
register vm_offset_t stack;
- void (*continuation)();
+ void (*continuation)(thread_t);
{
counter(if (++c_stacks_current > c_stacks_max)
c_stacks_max = c_stacks_current);
@@ -93,9 +93,10 @@ void stack_attach(thread, stack, continuation)
* This function will not return normally,
* so we don`t have to worry about a return address.
*/
- STACK_IKS(stack)->k_eip = (int) Thread_continue;
- STACK_IKS(stack)->k_ebx = (int) continuation;
- STACK_IKS(stack)->k_esp = (int) STACK_IEL(stack);
+ STACK_IKS(stack)->k_eip = (long) Thread_continue;
+ STACK_IKS(stack)->k_ebx = (long) continuation;
+ STACK_IKS(stack)->k_esp = (long) STACK_IEL(stack);
+ STACK_IKS(stack)->k_ebp = (long) 0;
/*
* Point top of kernel stack to user`s registers.
@@ -151,15 +152,16 @@ void switch_ktss(pcb)
*/
pcb_stack_top = (pcb->iss.efl & EFL_VM)
- ? (int) (&pcb->iss + 1)
- : (int) (&pcb->iss.v86_segs);
+ ? (long) (&pcb->iss + 1)
+ : (long) (&pcb->iss.v86_segs);
-#ifdef MACH_XEN
+#ifdef MACH_RING1
/* No IO mask here */
- hyp_stack_switch(KERNEL_DS, pcb_stack_top);
-#else /* MACH_XEN */
+ if (hyp_stack_switch(KERNEL_DS, pcb_stack_top))
+ panic("stack_switch");
+#else /* MACH_RING1 */
curr_ktss(mycpu)->tss.esp0 = pcb_stack_top;
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
}
{
@@ -171,28 +173,28 @@ void switch_ktss(pcb)
/*
* Use system LDT.
*/
-#ifdef MACH_HYP
+#ifdef MACH_PV_DESCRIPTORS
hyp_set_ldt(&ldt, LDTSZ);
-#else /* MACH_HYP */
+#else /* MACH_PV_DESCRIPTORS */
set_ldt(KERNEL_LDT);
-#endif /* MACH_HYP */
+#endif /* MACH_PV_DESCRIPTORS */
}
else {
/*
* Thread has its own LDT.
*/
-#ifdef MACH_HYP
+#ifdef MACH_PV_DESCRIPTORS
hyp_set_ldt(tldt->ldt,
(tldt->desc.limit_low|(tldt->desc.limit_high<<16)) /
sizeof(struct real_descriptor));
-#else /* MACH_HYP */
+#else /* MACH_PV_DESCRIPTORS */
*gdt_desc_p(mycpu,USER_LDT) = tldt->desc;
set_ldt(USER_LDT);
-#endif /* MACH_HYP */
+#endif /* MACH_PV_DESCRIPTORS */
}
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
{
int i;
for (i=0; i < USER_GDT_SLOTS; i++) {
@@ -204,14 +206,14 @@ void switch_ktss(pcb)
}
}
}
-#else /* MACH_XEN */
+#else /* MACH_PV_DESCRIPTORS */
/* Copy in the per-thread GDT slots. No reloading is necessary
because just restoring the segment registers on the way back to
user mode reloads the shadow registers from the in-memory GDT. */
memcpy (gdt_desc_p (mycpu, USER_GDT),
pcb->ims.user_gdt, sizeof pcb->ims.user_gdt);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
/*
* Load the floating-point context, if necessary.
@@ -368,10 +370,8 @@ thread_t switch_context(old, continuation, new)
void pcb_module_init()
{
- pcb_zone = zinit(sizeof(struct pcb), 0,
- THREAD_MAX * sizeof(struct pcb),
- THREAD_CHUNK * sizeof(struct pcb),
- 0, "i386 pcb state");
+ kmem_cache_init(&pcb_cache, "pcb", sizeof(struct pcb), 0,
+ NULL, NULL, NULL, 0);
fpu_module_init();
}
@@ -381,7 +381,7 @@ void pcb_init(thread)
{
register pcb_t pcb;
- pcb = (pcb_t) zalloc(pcb_zone);
+ pcb = (pcb_t) kmem_cache_alloc(&pcb_cache);
if (pcb == 0)
panic("pcb_init");
@@ -421,7 +421,7 @@ void pcb_terminate(thread)
fp_free(pcb->ims.ifps);
if (pcb->ims.ldt != 0)
user_ldt_free(pcb->ims.ldt);
- zfree(pcb_zone, (vm_offset_t) pcb);
+ kmem_cache_free(&pcb_cache, (vm_offset_t) pcb);
thread->pcb = 0;
}
@@ -811,7 +811,7 @@ set_user_regs(stack_base, stack_size, exec_info, arg_size)
arg_addr = stack_base + stack_size - arg_size;
saved_state = USER_REGS(current_thread());
- saved_state->uesp = (int)arg_addr;
+ saved_state->uesp = (long)arg_addr;
saved_state->eip = exec_info->entry;
return (arg_addr);
diff --git a/i386/i386/pcb.h b/i386/i386/pcb.h
index f8671a2e..21bdfd9c 100644
--- a/i386/i386/pcb.h
+++ b/i386/i386/pcb.h
@@ -64,7 +64,7 @@ extern void load_context (thread_t new);
extern void stack_attach (
thread_t thread,
vm_offset_t stack,
- void (*continuation)());
+ void (*continuation)(thread_t));
extern vm_offset_t stack_detach (thread_t thread);
diff --git a/i386/i386/phys.c b/i386/i386/phys.c
index 925593b3..ed4a309a 100644
--- a/i386/i386/phys.c
+++ b/i386/i386/phys.c
@@ -105,9 +105,5 @@ vm_offset_t addr;
if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL)
return 0;
- return i386_trunc_page(
-#ifdef MACH_PSEUDO_PHYS
- ma_to_pa
-#endif /* MACH_PSEUDO_PHYS */
- (*pte)) | (addr & INTEL_OFFMASK);
+ return pte_to_pa(*pte) | (addr & INTEL_OFFMASK);
}
diff --git a/i386/i386/pic.h b/i386/i386/pic.h
index 7a177d86..52f6ec16 100644
--- a/i386/i386/pic.h
+++ b/i386/i386/pic.h
@@ -179,6 +179,10 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#ifndef __ASSEMBLER__
extern void form_pic_mask (void);
extern void picinit (void);
+extern int curr_pic_mask;
+extern int pic_mask[];
+extern void prtnull(int unit);
+extern void intnull(int unit);
#endif
#endif /* _I386_PIC_H_ */
diff --git a/i386/i386/proc_reg.h b/i386/i386/proc_reg.h
index 64d8c43f..f1b2c89f 100644
--- a/i386/i386/proc_reg.h
+++ b/i386/i386/proc_reg.h
@@ -77,54 +77,54 @@
#include <i386/ldt.h>
#endif /* MACH_HYP */
-static inline unsigned
+static inline unsigned long
get_eflags(void)
{
- unsigned eflags;
- asm("pushfd; popl %0" : "=r" (eflags));
+ unsigned long eflags;
+ asm("pushfd; pop %0" : "=r" (eflags));
return eflags;
}
static inline void
-set_eflags(unsigned eflags)
+set_eflags(unsigned long eflags)
{
- asm volatile("pushl %0; popfd" : : "r" (eflags));
+ asm volatile("push %0; popfd" : : "r" (eflags));
}
#define get_esp() \
({ \
- register unsigned int _temp__ asm("esp"); \
+ register unsigned long _temp__ asm("esp"); \
_temp__; \
})
#define get_eflags() \
({ \
- register unsigned int _temp__; \
- asm("pushf; popl %0" : "=r" (_temp__)); \
+ register unsigned long _temp__; \
+ asm("pushf; pop %0" : "=r" (_temp__)); \
_temp__; \
})
#define get_cr0() \
({ \
- register unsigned int _temp__; \
+ register unsigned long _temp__; \
asm volatile("mov %%cr0, %0" : "=r" (_temp__)); \
_temp__; \
})
#define set_cr0(value) \
({ \
- register unsigned int _temp__ = (value); \
+ register unsigned long _temp__ = (value); \
asm volatile("mov %0, %%cr0" : : "r" (_temp__)); \
})
#define get_cr2() \
({ \
- register unsigned int _temp__; \
+ register unsigned long _temp__; \
asm volatile("mov %%cr2, %0" : "=r" (_temp__)); \
_temp__; \
})
-#ifdef MACH_HYP
+#ifdef MACH_PV_PAGETABLES
extern unsigned long cr3;
#define get_cr3() (cr3)
#define set_cr3(value) \
@@ -133,24 +133,24 @@ extern unsigned long cr3;
if (!hyp_set_cr3(value)) \
panic("set_cr3"); \
})
-#else /* MACH_HYP */
+#else /* MACH_PV_PAGETABLES */
#define get_cr3() \
({ \
- register unsigned int _temp__; \
+ register unsigned long _temp__; \
asm volatile("mov %%cr3, %0" : "=r" (_temp__)); \
_temp__; \
})
#define set_cr3(value) \
({ \
- register unsigned int _temp__ = (value); \
- asm volatile("mov %0, %%cr3" : : "r" (_temp__)); \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr3" : : "r" (_temp__) : "memory"); \
})
-#endif /* MACH_HYP */
+#endif /* MACH_PV_PAGETABLES */
#define flush_tlb() set_cr3(get_cr3())
-#ifndef MACH_HYP
+#ifndef MACH_PV_PAGETABLES
#define invlpg(addr) \
({ \
asm volatile("invlpg (%0)" : : "r" (addr)); \
@@ -178,34 +178,34 @@ extern unsigned long cr3;
: "+r" (var) : "r" (end), \
"q" (LINEAR_DS), "q" (KERNEL_DS), "i" (PAGE_SIZE)); \
})
-#endif /* MACH_HYP */
+#endif /* MACH_PV_PAGETABLES */
#define get_cr4() \
({ \
- register unsigned int _temp__; \
+ register unsigned long _temp__; \
asm volatile("mov %%cr4, %0" : "=r" (_temp__)); \
_temp__; \
})
#define set_cr4(value) \
({ \
- register unsigned int _temp__ = (value); \
+ register unsigned long _temp__ = (value); \
asm volatile("mov %0, %%cr4" : : "r" (_temp__)); \
})
-#ifdef MACH_HYP
+#ifdef MACH_RING1
#define set_ts() \
hyp_fpu_taskswitch(1)
#define clear_ts() \
hyp_fpu_taskswitch(0)
-#else /* MACH_HYP */
+#else /* MACH_RING1 */
#define set_ts() \
set_cr0(get_cr0() | CR0_TS)
#define clear_ts() \
asm volatile("clts")
-#endif /* MACH_HYP */
+#endif /* MACH_RING1 */
#define get_tr() \
({ \
diff --git a/i386/i386/seg.h b/i386/i386/seg.h
index a7f65736..163a1c52 100644
--- a/i386/i386/seg.h
+++ b/i386/i386/seg.h
@@ -38,11 +38,11 @@
*/
/* Note: the value of KERNEL_RING is handled by hand in locore.S */
-#ifdef MACH_HYP
+#ifdef MACH_RING1
#define KERNEL_RING 1
-#else /* MACH_HYP */
+#else /* MACH_RING1 */
#define KERNEL_RING 0
-#endif /* MACH_HYP */
+#endif /* MACH_RING1 */
#ifndef __ASSEMBLER__
@@ -161,12 +161,12 @@ MACH_INLINE void
fill_descriptor(struct real_descriptor *_desc, unsigned base, unsigned limit,
unsigned char access, unsigned char sizebits)
{
- /* TODO: when !MACH_XEN, setting desc and just memcpy isn't simpler actually */
-#ifdef MACH_XEN
+ /* TODO: when !MACH_PV_DESCRIPTORS, setting desc and just memcpy isn't simpler actually */
+#ifdef MACH_PV_DESCRIPTORS
struct real_descriptor __desc, *desc = &__desc;
-#else /* MACH_XEN */
+#else /* MACH_PV_DESCRIPTORS */
struct real_descriptor *desc = _desc;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
if (limit > 0xfffff)
{
limit >>= 12;
@@ -179,10 +179,10 @@ fill_descriptor(struct real_descriptor *_desc, unsigned base, unsigned limit,
desc->limit_high = limit >> 16;
desc->granularity = sizebits;
desc->base_high = base >> 24;
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
if (hyp_do_update_descriptor(kv_to_ma(_desc), *(uint64_t*)desc))
panic("couldn't update descriptor(%p to %08lx%08lx)\n", (vm_offset_t) kv_to_ma(_desc), *(((unsigned long*)desc)+1), *(unsigned long *)desc);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
}
/* Fill a gate with particular values. */
diff --git a/i386/i386/spl.S b/i386/i386/spl.S
index f1d4b45f..3c075092 100644
--- a/i386/i386/spl.S
+++ b/i386/i386/spl.S
@@ -143,7 +143,7 @@ ENTRY(spl7)
SETIPL(SPL7)
ENTRY(splx)
- movl 4(%esp),%edx /* get ipl */
+ movl S_ARG0,%edx /* get ipl */
testl %edx,%edx /* spl0? */
jz EXT(spl0) /* yes, handle specially */
cmpl EXT(curr_ipl),%edx /* same ipl as current? */
@@ -160,7 +160,7 @@ ENTRY(splx)
.align TEXT_ALIGN
.globl splx_cli
splx_cli:
- movl 4(%esp),%edx /* get ipl */
+ movl S_ARG0,%edx /* get ipl */
cli /* disable interrupts */
testl %edx,%edx /* spl0? */
jnz 2f /* no, skip */
diff --git a/i386/i386/task.h b/i386/i386/task.h
index ca8de044..0060ad4e 100644
--- a/i386/i386/task.h
+++ b/i386/i386/task.h
@@ -24,7 +24,7 @@
#define _I386_TASK_H_
#include <kern/kern_types.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
/* The machine specific data of a task. */
struct machine_task
@@ -41,7 +41,7 @@ struct machine_task
typedef struct machine_task machine_task_t;
-extern zone_t machine_task_iopb_zone;
+extern struct kmem_cache machine_task_iopb_cache;
/* Initialize the machine task module. The function is called once at
start up by task_init in kern/task.c. */
diff --git a/i386/i386/thread.h b/i386/i386/thread.h
index f2ae8bf0..eddd25c2 100644
--- a/i386/i386/thread.h
+++ b/i386/i386/thread.h
@@ -51,31 +51,31 @@
*/
struct i386_saved_state {
- unsigned int gs;
- unsigned int fs;
- unsigned int es;
- unsigned int ds;
- unsigned int edi;
- unsigned int esi;
- unsigned int ebp;
- unsigned int cr2; /* kernel esp stored by pusha -
+ unsigned long gs;
+ unsigned long fs;
+ unsigned long es;
+ unsigned long ds;
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebp;
+ unsigned long cr2; /* kernel esp stored by pusha -
we save cr2 here later */
- unsigned int ebx;
- unsigned int edx;
- unsigned int ecx;
- unsigned int eax;
- unsigned int trapno;
- unsigned int err;
- unsigned int eip;
- unsigned int cs;
- unsigned int efl;
- unsigned int uesp;
- unsigned int ss;
+ unsigned long ebx;
+ unsigned long edx;
+ unsigned long ecx;
+ unsigned long eax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long eip;
+ unsigned long cs;
+ unsigned long efl;
+ unsigned long uesp;
+ unsigned long ss;
struct v86_segs {
- unsigned int v86_es; /* virtual 8086 segment registers */
- unsigned int v86_ds;
- unsigned int v86_fs;
- unsigned int v86_gs;
+ unsigned long v86_es; /* virtual 8086 segment registers */
+ unsigned long v86_ds;
+ unsigned long v86_fs;
+ unsigned long v86_gs;
} v86_segs;
};
@@ -97,12 +97,12 @@ struct i386_exception_link {
*/
struct i386_kernel_state {
- int k_ebx; /* kernel context */
- int k_esp;
- int k_ebp;
- int k_edi;
- int k_esi;
- int k_eip;
+ long k_ebx; /* kernel context */
+ long k_esp;
+ long k_ebp;
+ long k_edi;
+ long k_esi;
+ long k_eip;
};
/*
@@ -144,16 +144,16 @@ struct v86_assist_state {
*/
struct i386_interrupt_state {
- int gs;
- int fs;
- int es;
- int ds;
- int edx;
- int ecx;
- int eax;
- int eip;
- int cs;
- int efl;
+ long gs;
+ long fs;
+ long es;
+ long ds;
+ long edx;
+ long ecx;
+ long eax;
+ long eip;
+ long cs;
+ long efl;
};
/*
diff --git a/i386/i386/trap.c b/i386/i386/trap.c
index 22dd4914..c6aab488 100644
--- a/i386/i386/trap.c
+++ b/i386/i386/trap.c
@@ -33,6 +33,7 @@
#include <mach/machine/eflags.h>
#include <i386/trap.h>
#include <i386/fpu.h>
+#include <i386/locore.h>
#include <i386/model_dep.h>
#include <intel/read_fault.h>
#include <machine/machspl.h> /* for spl_t */
@@ -122,25 +123,6 @@ user_page_fault_continue(kr)
/*NOTREACHED*/
}
-/*
- * Fault recovery in copyin/copyout routines.
- */
-struct recovery {
- int fault_addr;
- int recover_addr;
-};
-
-extern struct recovery recover_table[];
-extern struct recovery recover_table_end[];
-
-/*
- * Recovery from Successful fault in copyout does not
- * return directly - it retries the pte check, since
- * the 386 ignores write protection in kernel mode.
- */
-extern struct recovery retry_table[];
-extern struct recovery retry_table_end[];
-
static char *trap_type[] = {
"Divide error",
@@ -235,10 +217,10 @@ dump_ss(regs);
printf("now %08x\n", subcode);
#endif
if (trunc_page(subcode) == 0
- || (subcode >= (int)_start
- && subcode < (int)etext)) {
+ || (subcode >= (long)_start
+ && subcode < (long)etext)) {
printf("Kernel page fault at address 0x%x, "
- "eip = 0x%x\n",
+ "eip = 0x%lx\n",
subcode, regs->eip);
goto badtrap;
}
@@ -295,6 +277,7 @@ dump_ss(regs);
*/
register struct recovery *rp;
+ /* Linear searching; but the list is small enough. */
for (rp = retry_table; rp < retry_table_end; rp++) {
if (regs->eip == rp->fault_addr) {
regs->eip = rp->recover_addr;
@@ -311,6 +294,7 @@ dump_ss(regs);
{
register struct recovery *rp;
+ /* Linear searching; but the list is small enough. */
for (rp = recover_table;
rp < recover_table_end;
rp++) {
@@ -344,7 +328,7 @@ dump_ss(regs);
printf("%s trap", trap_type[type]);
else
printf("trap %d", type);
- printf(", eip 0x%x\n", regs->eip);
+ printf(", eip 0x%lx\n", regs->eip);
#if MACH_TTD
if (kttd_enabled && kttd_trap(type, code, regs))
return;
@@ -378,7 +362,7 @@ int user_trap(regs)
if ((vm_offset_t)thread < phys_last_addr) {
printf("user_trap: bad thread pointer 0x%p\n", thread);
- printf("trap type %d, code 0x%x, va 0x%x, eip 0x%x\n",
+ printf("trap type %ld, code 0x%lx, va 0x%lx, eip 0x%lx\n",
regs->trapno, regs->err, regs->cr2, regs->eip);
asm volatile ("1: hlt; jmp 1b");
}
@@ -526,7 +510,7 @@ printf("user trap %d error %d sub %08x\n", type, code, subcode);
/*NOTREACHED*/
break;
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
case 15:
{
static unsigned count = 0;
@@ -556,7 +540,7 @@ printf("user trap %d error %d sub %08x\n", type, code, subcode);
return 0;
#endif /* MACH_KDB */
splhigh();
- printf("user trap, type %d, code = %x\n",
+ printf("user trap, type %d, code = %lx\n",
type, regs->err);
dump_ss(regs);
panic("trap");
@@ -601,7 +585,7 @@ i386_astintr()
int mycpu = cpu_number();
(void) splsched(); /* block interrupts to check reasons */
-#ifndef MACH_XEN
+#ifndef MACH_RING1
if (need_ast[mycpu] & AST_I386_FP) {
/*
* AST was for delayed floating-point exception -
@@ -614,7 +598,7 @@ i386_astintr()
fpastintr();
}
else
-#endif /* MACH_XEN */
+#endif /* MACH_RING1 */
{
/*
* Not an FPU trap. Handle the AST.
diff --git a/i386/i386/user_ldt.c b/i386/i386/user_ldt.c
index dfe6b1e6..74c10a4c 100644
--- a/i386/i386/user_ldt.c
+++ b/i386/i386/user_ldt.c
@@ -196,17 +196,17 @@ i386_set_ldt(thread, first_selector, desc_list, count, desc_list_inline)
if (new_ldt == 0) {
simple_unlock(&pcb->lock);
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
/* LDT needs to be aligned on a page */
vm_offset_t alloc = kalloc(ldt_size_needed + PAGE_SIZE + offsetof(struct user_ldt, ldt));
new_ldt = (user_ldt_t) (round_page((alloc + offsetof(struct user_ldt, ldt))) - offsetof(struct user_ldt, ldt));
new_ldt->alloc = alloc;
-#else /* MACH_XEN */
+#else /* MACH_PV_DESCRIPTORS */
new_ldt = (user_ldt_t)
kalloc(ldt_size_needed
+ sizeof(struct real_descriptor));
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
/*
* Build a descriptor that describes the
* LDT itself
@@ -272,19 +272,21 @@ i386_set_ldt(thread, first_selector, desc_list, count, desc_list_inline)
simple_unlock(&pcb->lock);
if (new_ldt)
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
{
int i;
+#ifdef MACH_PV_PAGETABLES
for (i=0; i<(new_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE/sizeof(struct real_descriptor))
pmap_set_page_readwrite(&new_ldt->ldt[i]);
+#endif /* MACH_PV_PAGETABLES*/
kfree(new_ldt->alloc, new_ldt->desc.limit_low + 1
+ PAGE_SIZE + offsetof(struct user_ldt, ldt));
}
-#else /* MACH_XEN */
+#else /* MACH_PV_DESCRIPTORS */
kfree((vm_offset_t)new_ldt,
new_ldt->desc.limit_low + 1
+ sizeof(struct real_descriptor));
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
/*
* Free the descriptor list, if it was
@@ -417,17 +419,19 @@ void
user_ldt_free(user_ldt)
user_ldt_t user_ldt;
{
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
int i;
+#ifdef MACH_PV_PAGETABLES
for (i=0; i<(user_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE/sizeof(struct real_descriptor))
pmap_set_page_readwrite(&user_ldt->ldt[i]);
+#endif /* MACH_PV_PAGETABLES */
kfree(user_ldt->alloc, user_ldt->desc.limit_low + 1
+ PAGE_SIZE + offsetof(struct user_ldt, ldt));
-#else /* MACH_XEN */
+#else /* MACH_PV_DESCRIPTORS */
kfree((vm_offset_t)user_ldt,
user_ldt->desc.limit_low + 1
+ sizeof(struct real_descriptor));
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
}
diff --git a/i386/i386/user_ldt.h b/i386/i386/user_ldt.h
index 8d16ed87..6c6c858e 100644
--- a/i386/i386/user_ldt.h
+++ b/i386/i386/user_ldt.h
@@ -36,9 +36,9 @@
#include <i386/seg.h>
struct user_ldt {
-#ifdef MACH_XEN
+#ifdef MACH_PV_DESCRIPTORS
vm_offset_t alloc; /* allocation before alignment */
-#endif /* MACH_XEN */
+#endif /* MACH_PV_DESCRIPTORS */
struct real_descriptor desc; /* descriptor for self */
struct real_descriptor ldt[1]; /* descriptor table (variable) */
};
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
index 95df6044..ffd91d65 100644
--- a/i386/i386/vm_param.h
+++ b/i386/i386/vm_param.h
@@ -25,25 +25,41 @@
/* XXX use xu/vm_param.h */
#include <mach/vm_param.h>
+#ifdef MACH_PV_PAGETABLES
#include <xen/public/xen.h>
+#endif
+
+/* The kernel address space is usually 1GB, usually starting at virtual address 0. */
+/* This can be changed freely to separate kernel addresses from user addresses
+ * for better trace support in kdb; the _START symbol has to be offset by the
+ * same amount. */
+#define VM_MIN_KERNEL_ADDRESS 0xC0000000UL
-/* The kernel address space is 1GB, starting at virtual address 0. */
#ifdef MACH_XEN
-#define VM_MIN_KERNEL_ADDRESS 0x20000000UL
+/* PV kernels can be loaded directly to the target virtual address */
+#define INIT_VM_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS
#else /* MACH_XEN */
-#define VM_MIN_KERNEL_ADDRESS 0x00000000UL
+/* This must remain 0 */
+#define INIT_VM_MIN_KERNEL_ADDRESS 0x00000000UL
#endif /* MACH_XEN */
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
#if PAE
#define HYP_VIRT_START HYPERVISOR_VIRT_START_PAE
#else /* PAE */
#define HYP_VIRT_START HYPERVISOR_VIRT_START_NONPAE
#endif /* PAE */
#define VM_MAX_KERNEL_ADDRESS (HYP_VIRT_START - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
#define VM_MAX_KERNEL_ADDRESS (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
+
+/* Reserve mapping room for kmem. */
+#ifdef MACH_XEN
+#define VM_KERNEL_MAP_SIZE (224 * 1024 * 1024)
+#else
+#define VM_KERNEL_MAP_SIZE (192 * 1024 * 1024)
+#endif
/* The kernel virtual address space is actually located
at high linear addresses.
@@ -51,14 +67,14 @@
#define LINEAR_MIN_KERNEL_ADDRESS (VM_MAX_ADDRESS)
#define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffUL)
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* need room for mmu updates (2*8bytes) */
#define KERNEL_STACK_SIZE (4*I386_PGBYTES)
#define INTSTACK_SIZE (4*I386_PGBYTES)
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
#define KERNEL_STACK_SIZE (1*I386_PGBYTES)
#define INTSTACK_SIZE (1*I386_PGBYTES)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
/* interrupt stack size */
/*
diff --git a/i386/i386/xen.h b/i386/i386/xen.h
index 731e5a2f..5bdaf0b8 100644
--- a/i386/i386/xen.h
+++ b/i386/i386/xen.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006, 2009, 2010, 2011 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2011 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
#include <mach/machine/vm_types.h>
#include <mach/vm_param.h>
#include <mach/inline.h>
+#include <mach/xen.h>
#include <machine/vm_param.h>
#include <intel/pmap.h>
#include <kern/debug.h>
@@ -37,7 +38,7 @@
#define wmb() mb()
MACH_INLINE unsigned long xchgl(volatile unsigned long *ptr, unsigned long x)
{
- __asm__ __volatile__("xchgl %0, %1"
+ __asm__ __volatile__("xchg %0, %1"
: "=r" (x)
: "m" (*(ptr)), "0" (x): "memory");
return x;
@@ -151,6 +152,7 @@ MACH_INLINE type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type
_hypcall1(long, set_trap_table, vm_offset_t /* struct trap_info * */, traps);
+#ifdef MACH_PV_PAGETABLES
_hypcall4(int, mmu_update, vm_offset_t /* struct mmu_update * */, req, int, count, vm_offset_t /* int * */, success_count, domid_t, domid)
MACH_INLINE int hyp_mmu_update_pte(pt_entry_t pte, pt_entry_t val)
{
@@ -169,6 +171,7 @@ MACH_INLINE int hyp_mmu_update_pte(pt_entry_t pte, pt_entry_t val)
#define hyp_mmu_update_la(la, val) hyp_mmu_update_pte( \
(kernel_pmap->dirbase[lin2pdenum((vm_offset_t)(la))] & INTEL_PTE_PFN) \
+ ptenum((vm_offset_t)(la)) * sizeof(pt_entry_t), val)
+#endif
_hypcall2(long, set_gdt, vm_offset_t /* unsigned long * */, frame_list, unsigned int, entries)
@@ -215,6 +218,7 @@ MACH_INLINE void hyp_free_page(unsigned long pfn, void *va)
/* save mfn */
unsigned long mfn = pfn_to_mfn(pfn);
+#ifdef MACH_PV_PAGETABLES
/* remove from mappings */
if (hyp_do_update_va_mapping(kvtolin(va), 0, UVMF_INVLPG|UVMF_ALL))
panic("couldn't clear page %d at %p\n", pfn, va);
@@ -223,11 +227,13 @@ MACH_INLINE void hyp_free_page(unsigned long pfn, void *va)
/* drop machine page */
mfn_list[pfn] = ~0;
#endif /* MACH_PSEUDO_PHYS */
+#endif
/* and free from Xen */
hyp_free_mfn(mfn);
}
+#ifdef MACH_PV_PAGETABLES
_hypcall4(int, mmuext_op, vm_offset_t /* struct mmuext_op * */, op, int, count, vm_offset_t /* int * */, success_count, domid_t, domid);
MACH_INLINE int hyp_mmuext_op_void(unsigned int cmd)
{
@@ -273,6 +279,7 @@ MACH_INLINE void hyp_invlpg(vm_offset_t lin) {
if (n < 1)
panic("couldn't invlpg\n");
}
+#endif
_hypcall2(long, set_timer_op, unsigned long, absolute_lo, unsigned long, absolute_hi);
#define hyp_do_set_timer_op(absolute_nsec) ({ \
diff --git a/i386/i386/zalloc.h b/i386/i386/zalloc.h
deleted file mode 100644
index bf7cf6b2..00000000
--- a/i386/i386/zalloc.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 1996-1994 The University of Utah and
- * the Computer Systems Laboratory (CSL). All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software is hereby
- * granted provided that (1) source code retains these copyright, permission,
- * and disclaimer notices, and (2) redistributions including binaries
- * reproduce the notices in supporting documentation, and (3) all advertising
- * materials mentioning features or use of this software display the following
- * acknowledgement: ``This product includes software developed by the
- * Computer Systems Laboratory at the University of Utah.''
- *
- * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
- * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
- * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * CSL requests users of this software to return to csl-dist@cs.utah.edu any
- * improvements that they make and grant CSL redistribution rights.
- *
- * Utah $Hdr: zalloc.h 1.4 94/12/16$
- * Author: Bryan Ford
- */
-
-#ifndef _I386_ZALLOC_H_
-#define _I386_ZALLOC_H_
-
-#include <kern/zalloc.h>
-
-#endif /* _I386_ZALLOC_H_ */
diff --git a/i386/i386at/autoconf.c b/i386/i386at/autoconf.c
index 7713dec6..93c71412 100644
--- a/i386/i386at/autoconf.c
+++ b/i386/i386at/autoconf.c
@@ -25,16 +25,7 @@
*/
#include <kern/printf.h>
-#ifdef MACH_KERNEL
#include <mach/std_types.h>
-#else /* MACH_KERNEL */
-#include <cpus.h>
-#include <platforms.h>
-#include <generic.h>
-#include <sys/param.h>
-#include <mach/machine.h>
-#include <machine/cpu.h>
-#endif /* MACH_KERNEL */
#include <i386/pic.h>
#include <i386/ipl.h>
#include <chips/busses.h>
@@ -144,7 +135,7 @@ void take_dev_irq(
printf("The device below will clobber IRQ %d.\n", pic);
printf("You have two devices at the same IRQ.\n");
printf("This won't work. Reconfigure your hardware and try again.\n");
- printf("%s%d: port = %x, spl = %d, pic = %d.\n",
+ printf("%s%d: port = %lx, spl = %ld, pic = %d.\n",
dev->name, dev->unit, dev->address,
dev->sysdep, dev->sysdep1);
while (1);
diff --git a/i386/i386at/boothdr.S b/i386/i386at/boothdr.S
index 45cd599f..567851e4 100644
--- a/i386/i386at/boothdr.S
+++ b/i386/i386at/boothdr.S
@@ -39,6 +39,19 @@ boot_hdr:
#endif /* __ELF__ */
boot_entry:
+ /* use segmentation to offset ourself. */
+ lgdt boot_gdt_descr - KERNELBASE
+ ljmp $8,$0f
+0:
+ movw $0,%ax
+ movw %ax,%ds
+ movw %ax,%es
+ movw %ax,%fs
+ movw %ax,%gs
+ movw $16,%ax
+ movw %ax,%ds
+ movw %ax,%es
+ movw %ax,%ss
/* Switch to our own interrupt stack. */
movl $_intstack+INTSTACK_SIZE,%esp
@@ -80,3 +93,27 @@ iplt_done:
.comm _intstack,INTSTACK_SIZE
+.align 16
+ .word 0
+boot_gdt_descr:
+ .word 3*8+7
+ .long boot_gdt - KERNELBASE
+.align 16
+boot_gdt:
+ /* 0 */
+ .quad 0
+ /* boot CS = 8 */
+ .word 0xffff
+ .word (-KERNELBASE) & 0xffff
+ .byte ((-KERNELBASE) >> 16) & 0xff
+ .byte 0x9a
+ .byte 0xcf
+ .byte ((-KERNELBASE) >> 24) & 0xff
+ /* boot DS = 8 */
+ .word 0xffff
+ .word (-KERNELBASE) & 0xffff
+ .byte ((-KERNELBASE) >> 16) & 0xff
+ .byte 0x92
+ .byte 0xcf
+ .byte ((-KERNELBASE) >> 24) & 0xff
+
diff --git a/i386/i386at/com.c b/i386/i386at/com.c
index 325e0991..93c3faaa 100644
--- a/i386/i386at/com.c
+++ b/i386/i386at/com.c
@@ -35,7 +35,7 @@
#include <kern/mach_clock.h>
#include <sys/time.h>
#include <device/conf.h>
-#include <device/errno.h>
+#include <device/device_types.h>
#include <device/tty.h>
#include <device/io_req.h>
@@ -70,10 +70,6 @@ int comtimer_state[NCOM];
#define RCBAUD B9600
static int rcline = -1;
static struct bus_device *comcndev;
-int comcnprobe(struct consdev *cp);
-int comcninit(struct consdev *cp);
-int comcngetc(dev_t dev, int wait);
-int comcnputc(dev_t dev, int c);
/* XX */
extern char *kernel_cmdline;
@@ -215,6 +211,8 @@ comcnprobe(struct consdev *cp)
cp->cn_dev = makedev(maj, unit);
cp->cn_pri = pri;
+
+ return 0;
}
@@ -231,7 +229,7 @@ comattach(struct bus_device *dev)
u_short addr = dev->address;
take_dev_irq(dev);
- printf(", port = %x, spl = %d, pic = %d. (DOS COM%d)",
+ printf(", port = %lx, spl = %ld, pic = %d. (DOS COM%d)",
dev->address, dev->sysdep, dev->sysdep1, unit+1);
/* comcarrier[unit] = addr->flags;*/
@@ -267,7 +265,7 @@ comcninit(struct consdev *cp)
outb(LINE_CTL(addr), iDLAB);
outb(BAUD_LSB(addr), divisorreg[RCBAUD] & 0xff);
outb(BAUD_MSB(addr), divisorreg[RCBAUD] >>8);
- outb(LINE_CTL(addr), i7BITS|iPEN);
+ outb(LINE_CTL(addr), i8BITS);
outb(INTR_ENAB(addr), 0);
outb(MODEM_CTL(addr), iDTR|iRTS|iOUT2);
@@ -287,6 +285,7 @@ comcninit(struct consdev *cp)
}
}
+ return 0;
}
/*
@@ -336,13 +335,13 @@ io_return_t comopen(
io_return_t result;
if (unit >= NCOM)
- return ENXIO; /* no such device */
+ return D_NO_SUCH_DEVICE; /* no such device */
if ((isai = cominfo[unit]) == 0 || isai->alive == 0) {
/*
* Try to probe it again
*/
if (!com_reprobe(unit))
- return ENXIO;
+ return D_NO_SUCH_DEVICE;
}
tp = &com_tty[unit];
@@ -448,7 +447,7 @@ comgetstat(dev, flavor, data, count)
dev_t dev;
int flavor;
int *data; /* pointer to OUT array */
-unsigned int *count; /* out */
+natural_t *count; /* out */
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
@@ -471,7 +470,7 @@ comsetstat(dev, flavor, data, count)
dev_t dev;
int flavor;
int * data;
-unsigned int count;
+natural_t count;
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
@@ -622,7 +621,9 @@ comstart(tp)
struct tty *tp;
{
char nch;
+#if 0
int i;
+#endif
if (tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) {
comst_1++;
@@ -656,7 +657,7 @@ comst_4++;
#else
nch = getc(&tp->t_outq);
if ((nch & 0200) && ((tp->t_flags & LITOUT) == 0)) {
- timeout(ttrstrt, (char *)tp, (nch & 0x7f) + 6);
+ timeout((timer_func_t *)ttrstrt, (char *)tp, (nch & 0x7f) + 6);
tp->t_state |= TS_TIMEOUT;
comst_4++;
return;
@@ -884,6 +885,8 @@ comcnputc(dev_t dev, int c)
if (c == '\n')
comcnputc(dev, '\r');
outb(addr, c);
+
+ return 0;
}
int
diff --git a/i386/i386at/com.h b/i386/i386at/com.h
index e53e9482..49f23eec 100644
--- a/i386/i386at/com.h
+++ b/i386/i386at/com.h
@@ -27,6 +27,7 @@
#define _COM_H_
#include <mach/std_types.h>
+#include <device/cons.h>
/*
* Set receive modem state from modem status register.
@@ -42,4 +43,9 @@ extern void commodem_intr(int unit, int stat);
extern int comgetc(int unit);
+extern int comcnprobe(struct consdev *cp);
+extern int comcninit(struct consdev *cp);
+extern int comcngetc(dev_t dev, int wait);
+extern int comcnputc(dev_t dev, int c);
+
#endif /* _COM_H_ */
diff --git a/i386/i386at/cons_conf.c b/i386/i386at/cons_conf.c
index 8a4b37c5..cf42bb63 100644
--- a/i386/i386at/cons_conf.c
+++ b/i386/i386at/cons_conf.c
@@ -31,11 +31,11 @@
#include <device/cons.h>
#ifdef MACH_HYP
-extern int hypcnprobe(), hypcninit(), hypcngetc(), hypcnputc();
+#include <xen/console.h>
#else /* MACH_HYP */
-extern int kdcnprobe(), kdcninit(), kdcngetc(), kdcnputc();
+#include "kd.h"
#if NCOM > 0
-extern int comcnprobe(), comcninit(), comcngetc(), comcnputc();
+#include "com.h"
#endif
#endif /* MACH_HYP */
diff --git a/i386/i386at/kd.c b/i386/i386at/kd.c
index e4ac8325..c9778629 100644
--- a/i386/i386at/kd.c
+++ b/i386/i386at/kd.c
@@ -85,6 +85,7 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <device/io_req.h>
#include <device/buf.h> /* for struct uio (!) */
#include <vm/vm_kern.h>
+#include <i386/locore.h>
#include <i386/loose_ends.h>
#include <i386/vm_param.h>
#include <i386/machspl.h>
@@ -112,10 +113,6 @@ static void charput(), charmvup(), charmvdown(), charclear(), charsetcursor();
static void kd_noopreset();
boolean_t kdcheckmagic();
-int kdcnprobe(struct consdev *cp);
-int kdcninit(struct consdev *cp);
-int kdcngetc(dev_t dev, int wait);
-void kdcnputc(dev_t dev, int c);
int do_modifier (int, Scancode, boolean_t);
/*
@@ -595,7 +592,7 @@ io_return_t kdgetstat(dev, flavor, data, count)
dev_t dev;
int flavor;
int * data; /* pointer to OUT array */
- unsigned int *count; /* OUT */
+ natural_t *count; /* OUT */
{
io_return_t result;
@@ -625,7 +622,7 @@ io_return_t kdsetstat(dev, flavor, data, count)
dev_t dev;
int flavor;
int * data;
- unsigned int count;
+ natural_t count;
{
io_return_t result;
@@ -735,9 +732,8 @@ int flags; /* flags set for console */
*/
/*ARGSUSED*/
void
-kdintr(vec, regs)
+kdintr(vec)
int vec;
-int regs;
{
struct tty *tp;
unsigned char c;
@@ -802,7 +798,7 @@ int regs;
goto done;
} else if (kd_kbd_mouse && kd_kbd_magic(scancode)) {
goto done;
- } else if (kdcheckmagic(scancode, &regs)) {
+ } else if (kdcheckmagic(scancode)) {
goto done;
} else if (kb_mode == KB_EVENT) {
kd_enqsc(scancode);
@@ -988,9 +984,8 @@ boolean_t up;
* are still held down.
*/
boolean_t
-kdcheckmagic(scancode, regs)
+kdcheckmagic(scancode)
Scancode scancode;
-int *regs;
{
static int magic_state = KS_NORMAL; /* like kd_state */
boolean_t up = FALSE;
@@ -2969,16 +2964,18 @@ kdcngetc(dev_t dev, int wait)
return kdcnmaygetc();
}
-void
+int
kdcnputc(dev_t dev, int c)
{
if (!kd_initialized)
- return;
+ return -1;
/* Note that tab is handled in kd_putc */
if (c == '\n')
kd_putc('\r');
kd_putc(c);
+
+ return 0;
}
/*
diff --git a/i386/i386at/kd.h b/i386/i386at/kd.h
index 98c8bfee..1d53538b 100644
--- a/i386/i386at/kd.h
+++ b/i386/i386at/kd.h
@@ -75,6 +75,7 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <mach/boolean.h>
#include <sys/types.h>
#include <sys/time.h>
+#include <device/cons.h>
/*
@@ -729,10 +730,16 @@ extern void kd_handle_ack (void);
extern int kd_kbd_magic (int);
extern int kdstate2idx (int, boolean_t);
extern void kd_parserest (u_char *);
+extern int kdcnprobe(struct consdev *cp);
+extern int kdcninit(struct consdev *cp);
+extern int kdcngetc(dev_t dev, int wait);
extern int kdcnmaygetc (void);
+extern int kdcnputc(dev_t dev, int c);
extern void kd_slmwd (void *start, int count, int value);
extern void kd_slmscu (void *from, void *to, int count);
extern void kd_slmscd (void *from, void *to, int count);
+extern void kdintr(int vec);
+
#endif /* _KD_H_ */
diff --git a/i386/i386at/kd_event.c b/i386/i386at/kd_event.c
index 3983a118..4d2ea008 100644
--- a/i386/i386at/kd_event.c
+++ b/i386/i386at/kd_event.c
@@ -59,20 +59,9 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <kern/printf.h>
#include <string.h>
-#ifdef MACH_KERNEL
#include <device/ds_routines.h>
-#include <device/errno.h>
+#include <device/device_types.h>
#include <device/io_req.h>
-#else /* MACH_KERNEL */
-#include <sys/file.h>
-#include <sys/errno.h>
-#include <kern/thread.h>
-#include <sys/user.h>
-#include <sys/proc.h>
-#include <sys/kernel.h>
-#include <sys/ioctl.h>
-#include <sys/tty.h>
-#endif /* MACH_KERNEL */
#include <i386/machspl.h>
#include <i386/pio.h>
#include <i386at/kd.h>
@@ -92,24 +81,12 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
kd_event_queue kbd_queue; /* queue of keyboard events */
-#ifdef MACH_KERNEL
queue_head_t kbd_read_queue = { &kbd_read_queue, &kbd_read_queue };
-#else /* MACH_KERNEL */
-struct proc *kbd_sel = 0; /* selecting process, if any */
-short kbdpgrp = 0; /* process group leader when dev is open */
-
-int kbdflag = 0;
-#define KBD_COLL 1 /* select collision */
-#define KBD_ASYNC 2 /* user wants asynch notification */
-#define KBD_NBIO 4 /* user wants non-blocking I/O */
-#endif /* MACH_KERNEL */
void kbd_enqueue();
-#ifdef MACH_KERNEL
io_return_t X_kdb_enter_init();
io_return_t X_kdb_exit_init();
-#endif /* MACH_KERNEL */
static boolean_t initialized = FALSE;
@@ -147,14 +124,6 @@ kbdopen(dev, flags)
splx(o_pri);
kbdinit();
-#ifdef MACH_KERNEL
-#else /* MACH_KERNEL */
- if (flags & FWRITE)
- return(ENODEV);
-
- if (kbdpgrp == 0)
- kbdpgrp = u.u_procp->p_pgrp;
-#endif /* MACH_KERNEL */
return(0);
}
@@ -173,18 +142,11 @@ kbdclose(dev, flags)
spl_t s = SPLKD();
kb_mode = KB_ASCII;
-#ifdef MACH_KERNEL
-#else /* MACH_KERNEL */
- kbdpgrp = 0;
- kbdflag = 0;
- kbd_sel = 0;
-#endif /* MACH_KERNEL */
kdq_reset(&kbd_queue);
splx(s);
}
-#ifdef MACH_KERNEL
io_return_t kbdgetstat(dev, flavor, data, count)
dev_t dev;
int flavor;
@@ -234,91 +196,12 @@ io_return_t kbdsetstat(dev, flavor, data, count)
return (D_SUCCESS);
}
-#else /* MACH_KERNEL */
-/*
- * kbdioctl - handling for asynch & non-blocking I/O.
- */
-
-/*ARGSUSED*/
-int
-kbdioctl(dev, cmd, data, flag)
- dev_t dev;
- int cmd;
- caddr_t data;
- int flag;
-{
- spl_t s = SPLKD();
- int err = 0;
-
- switch (cmd) {
- case KDSKBDMODE:
- kb_mode = *(int *)data;
- /* XXX - what to do about unread events? */
- /* XXX - should check that "data" contains an OK value */
- break;
- case KDGKBDTYPE:
- *(int *)data = KB_VANILLAKB;
- break;
- case K_X_KDB_ENTER:
- X_kdb_enter_init((struct X_kdb *) data);
- break;
- case K_X_KDB_EXIT:
- X_kdb_exit_init( (struct X_kdb *) data);
- break;
- case FIONBIO:
- if (*(int *)data)
- kbdflag |= KBD_NBIO;
- else
- kbdflag &= ~KBD_NBIO;
- break;
- case FIOASYNC:
- if (*(int *)data)
- kbdflag |= KBD_ASYNC;
- else
- kbdflag &= ~KBD_ASYNC;
- break;
- default:
- err = ENOTTY;
- break;
- }
-
- splx(s);
- return(err);
-}
-
-
-/*
- * kbdselect
- */
-
-/*ARGSUSED*/
-int
-kbdselect(dev, rw)
-{
- spl_t s = SPLKD();
-
- if (!kdq_empty(&kbd_queue)) {
- splx(s);
- return(1);
- }
-
- if (kbd_sel)
- kbdflag |= KBD_COLL;
- else
- kbd_sel = (struct proc *)current_thread();
- /* eeeyuck */
-
- splx(s);
- return(0);
-}
-#endif /* MACH_KERNEL */
/*
* kbdread - dequeue and return any queued events.
*/
-#ifdef MACH_KERNEL
boolean_t kbd_read_done(); /* forward */
int
@@ -391,44 +274,6 @@ boolean_t kbd_read_done(ior)
return (TRUE);
}
-#else /* MACH_KERNEL */
-/*ARGSUSED*/
-kbdread(dev, uio)
- dev_t dev;
- struct uio *uio;
-{
- int s = SPLKD();
- int err = 0;
- kd_event *ev;
- int i;
- char *cp;
-
- if (kdq_empty(&kbd_queue))
- if (kbdflag & KBD_NBIO) {
- err = EWOULDBLOCK;
- goto done;
- } else
- while (kdq_empty(&kbd_queue)) {
- splx(s);
- sleep((caddr_t)&kbd_queue, TTIPRI);
- s = SPLKD();
- }
-
- while (!kdq_empty(&kbd_queue) && uio->uio_resid >= sizeof(kd_event)) {
- ev = kdq_get(&kbd_queue);
- for (cp = (char *)ev, i = 0; i < sizeof(kd_event);
- ++i, ++cp) {
- err = ureadc(*cp, uio);
- if (err)
- goto done;
- }
- }
-
-done:
- splx(s);
- return(err);
-}
-#endif /* MACH_KERNEL */
/*
@@ -462,22 +307,11 @@ kbd_enqueue(ev)
else
kdq_put(&kbd_queue, ev);
-#ifdef MACH_KERNEL
{
register io_req_t ior;
while ((ior = (io_req_t)dequeue_head(&kbd_read_queue)) != 0)
iodone(ior);
}
-#else /* MACH_KERNEL */
- if (kbd_sel) {
- selwakeup(kbd_sel, kbdflag & KBD_COLL);
- kbd_sel = 0;
- kbdflag &= ~KBD_COLL;
- }
- if (kbdflag & KBD_ASYNC)
- gsignal(kbdpgrp, SIGIO);
- wakeup((caddr_t)&kbd_queue);
-#endif /* MACH_KERNEL */
}
u_int X_kdb_enter_str[512], X_kdb_exit_str[512];
@@ -538,7 +372,6 @@ register u_int *u_ip, *endp;
kdb_in_out(u_ip);
}
-#ifdef MACH_KERNEL
io_return_t
X_kdb_enter_init(data, count)
u_int *data;
@@ -564,28 +397,3 @@ X_kdb_exit_init(data, count)
X_kdb_exit_len = count;
return D_SUCCESS;
}
-#else /* MACH_KERNEL */
-void
-X_kdb_enter_init(kp)
-struct X_kdb *kp;
-{
- if (kp->size > sizeof X_kdb_enter_str)
- u.u_error = ENOENT;
- else if(copyin(kp->ptr, X_kdb_enter_str, kp->size) == EFAULT)
- u.u_error = EFAULT;
-
- X_kdb_enter_len = kp->size>>2;
-}
-
-void
-X_kdb_exit_init(kp)
-struct X_kdb *kp;
-{
- if (kp->size > sizeof X_kdb_exit_str)
- u.u_error = ENOENT;
- else if(copyin(kp->ptr, X_kdb_exit_str, kp->size) == EFAULT)
- u.u_error = EFAULT;
-
- X_kdb_exit_len = kp->size>>2;
-}
-#endif /* MACH_KERNEL */
diff --git a/i386/i386at/kd_mouse.c b/i386/i386at/kd_mouse.c
index 640209c5..6e7b68a8 100644
--- a/i386/i386at/kd_mouse.c
+++ b/i386/i386at/kd_mouse.c
@@ -67,21 +67,10 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <mach/boolean.h>
#include <sys/types.h>
#include <kern/printf.h>
-#ifdef MACH_KERNEL
#include <device/ds_routines.h>
-#include <device/errno.h>
+#include <device/device_types.h>
#include <device/io_req.h>
#include <device/subrs.h>
-#else /* MACH_KERNEL */
-#include <sys/file.h>
-#include <sys/errno.h>
-#include <kern/thread.h>
-#include <sys/user.h>
-#include <sys/proc.h>
-#include <sys/kernel.h>
-#include <sys/ioctl.h>
-#include <sys/tty.h>
-#endif /* MACH_KERNEL */
#include <i386/ipl.h>
#include <i386/pic.h>
#include <i386/pio.h>
@@ -100,20 +89,8 @@ extern struct bus_device *cominfo[];
kd_event_queue mouse_queue; /* queue of mouse events */
boolean_t mouse_in_use = FALSE;
-#ifdef MACH_KERNEL
queue_head_t mouse_read_queue = { &mouse_read_queue, &mouse_read_queue };
-#else /* MACH_KERNEL */
-struct proc *mouse_sel = 0; /* selecting process, if any */
-short mousepgrp = 0; /* process group leader when dev is open */
-#endif /* MACH_KERNEL */
-
-#ifdef MACH_KERNEL
-#else /* MACH_KERNEL */
-int mouseflag = 0;
-#define MOUSE_COLL 1 /* select collision */
-#define MOUSE_ASYNC 2 /* user wants asynch notification */
-#define MOUSE_NBIO 4 /* user wants non-blocking I/O */
-#endif /* MACH_KERNEL */
+
/*
* The state of the 3 buttons is encoded in the low-order 3 bits (both
@@ -176,20 +153,11 @@ mouseopen(dev, flags)
dev_t dev;
int flags;
{
-#ifdef MACH_KERNEL
-#else /* MACH_KERNEL */
- if (flags & FWRITE)
- return(ENODEV);
-#endif /* MACH_KERNEL */
if (mouse_in_use)
- return(EBUSY);
+ return (D_ALREADY_OPEN);
mouse_in_use = TRUE; /* locking? */
kdq_reset(&mouse_queue);
lastbuttons = MOUSE_ALL_UP;
-#ifdef MACH_KERNEL
-#else /* MACH_KERNEL */
- mousepgrp = u.u_procp->p_pgrp;
-#endif /* MACH_KERNEL */
switch (mouse_type = ((minor(dev) & 0xf8) >> 3)) {
case MICROSOFT_MOUSE7:
@@ -294,12 +262,6 @@ mouseclose(dev, flags)
kdq_reset(&mouse_queue); /* paranoia */
mouse_in_use = FALSE;
-#ifdef MACH_KERNEL
-#else /* MACH_KERNEL */
- mousepgrp = 0;
- mouseflag = 0;
- mouse_sel = 0;
-#endif /* MACH_KERNEL */
}
/*ARGSUSED*/
@@ -335,7 +297,6 @@ kd_mouse_close(dev, mouse_pic)
splx(s);
}
-#ifdef MACH_KERNEL
io_return_t mousegetstat(dev, flavor, data, count)
dev_t dev;
int flavor;
@@ -354,75 +315,10 @@ io_return_t mousegetstat(dev, flavor, data, count)
return D_SUCCESS;
}
-#else /* MACH_KERNEL */
-/*
- * mouseioctl - handling for asynch & non-blocking I/O.
- */
-
-/*ARGSUSED*/
-int
-mouseioctl(dev, cmd, data, flag)
- dev_t dev;
- int cmd;
- caddr_t data;
- int flag;
-{
- int s = SPLKD();
- int err = 0;
-
- switch (cmd) {
- case FIONBIO:
- if (*(int *)data)
- mouseflag |= MOUSE_NBIO;
- else
- mouseflag &= ~MOUSE_NBIO;
- break;
- case FIOASYNC:
- if (*(int *)data)
- mouseflag |= MOUSE_ASYNC;
- else
- mouseflag &= ~MOUSE_ASYNC;
- break;
- default:
- err = ENOTTY;
- break;
- }
-
- splx(s);
- return(err);
-}
-
-
-/*
- * mouseselect - check for pending events, etc.
- */
-
-/*ARGSUSED*/
-int
-mouseselect(dev, rw)
-{
- int s = SPLKD();
-
- if (!kdq_empty(&mouse_queue)) {
- splx(s);
- return(1);
- }
-
- if (mouse_sel)
- mouseflag |= MOUSE_COLL;
- else
- mouse_sel = (struct proc *)current_thread();
- /* eeeyuck */
-
- splx(s);
- return(0);
-}
-#endif /* MACH_KERNEL */
/*
* mouseread - dequeue and return any queued events.
*/
-#ifdef MACH_KERNEL
boolean_t mouse_read_done(); /* forward */
int
@@ -495,45 +391,6 @@ boolean_t mouse_read_done(ior)
return (TRUE);
}
-#else /* MACH_KERNEL */
-/*ARGSUSED*/
-int
-mouseread(dev, uio)
- dev_t dev;
- struct uio *uio;
-{
- int s = SPLKD();
- int err = 0;
- kd_event *ev;
- int i;
- char *cp;
-
- if (kdq_empty(&mouse_queue))
- if (mouseflag & MOUSE_NBIO) {
- err = EWOULDBLOCK;
- goto done;
- } else
- while (kdq_empty(&mouse_queue)) {
- splx(s);
- sleep((caddr_t)&mouse_queue, TTIPRI);
- s = SPLKD();
- }
-
- while (!kdq_empty(&mouse_queue) && uio->uio_resid >= sizeof(kd_event)) {
- ev = kdq_get(&mouse_queue);
- for (cp = (char *)ev, i = 0; i < sizeof(kd_event);
- ++i, ++cp) {
- err = ureadc(*cp, uio);
- if (err)
- goto done;
- }
- }
-
-done:
- splx(s);
- return(err);
-}
-#endif /* MACH_KERNEL */
/*
@@ -780,12 +637,8 @@ int kd_mouse_read(void)
while (mousebufindex <= mouse_char_index) {
mouse_char_wanted = TRUE;
-#ifdef MACH_KERNEL
assert_wait((event_t) &mousebuf, FALSE);
thread_block((void (*)()) 0);
-#else /* MACH_KERNEL */
- sleep(&mousebuf, PZERO);
-#endif /* MACH_KERNEL */
}
ch = mousebuf[mouse_char_index++];
@@ -955,20 +808,9 @@ mouse_enqueue(ev)
else
kdq_put(&mouse_queue, ev);
-#ifdef MACH_KERNEL
{
register io_req_t ior;
while ((ior = (io_req_t)dequeue_head(&mouse_read_queue)) != 0)
iodone(ior);
}
-#else /* MACH_KERNEL */
- if (mouse_sel) {
- selwakeup(mouse_sel, mouseflag & MOUSE_COLL);
- mouse_sel = 0;
- mouseflag &= ~MOUSE_COLL;
- }
- if (mouseflag & MOUSE_ASYNC)
- gsignal(mousepgrp, SIGIO);
- wakeup((caddr_t)&mouse_queue);
-#endif /* MACH_KERNEL */
}
diff --git a/i386/i386at/lpr.c b/i386/i386at/lpr.c
index 57f6f857..c92795ef 100644
--- a/i386/i386at/lpr.c
+++ b/i386/i386at/lpr.c
@@ -30,28 +30,15 @@
#if NLPR > 0
-#ifdef MACH_KERNEL
#include <mach/std_types.h>
#include <sys/types.h>
#include <kern/printf.h>
#include <kern/mach_clock.h>
#include <sys/time.h>
#include <device/conf.h>
-#include <device/errno.h>
+#include <device/device_types.h>
#include <device/tty.h>
#include <device/io_req.h>
-#else /* MACH_KERNEL */
-#include <sys/param.h>
-#include <sys/conf.h>
-#include <sys/dir.h>
-#include <sys/user.h>
-#include <sys/proc.h>
-#include <sys/ioctl.h>
-#include <sys/tty.h>
-#include <sys/systm.h>
-#include <sys/uio.h>
-#include <sys/file.h>
-#endif /* MACH_KERNEL */
#include <i386/ipl.h>
#include <i386/pio.h>
@@ -68,9 +55,7 @@ int lprprobe();
void lprstop();
void lprintr(), lprstart();
void lprattach(struct bus_device *);
-#ifdef MACH_KERNEL
int lprgetstat(), lprsetstat();
-#endif /* MACH_KERNEL */
void lprpr_addr();
struct bus_device *lprinfo[NLPR]; /* ??? */
@@ -117,7 +102,7 @@ void lprattach(struct bus_device *dev)
u_short addr = (u_short) dev->address;
take_dev_irq(dev);
- printf(", port = %x, spl = %d, pic = %d.",
+ printf(", port = %lx, spl = %ld, pic = %d.",
dev->address, dev->sysdep, dev->sysdep1);
lprinfo[unit] = dev;
@@ -130,9 +115,7 @@ int
lpropen(dev, flag, ior)
int dev;
int flag;
-#ifdef MACH_KERNEL
io_req_t ior;
-#endif /* MACH_KERNEL */
{
int unit = minor(dev);
struct bus_device *isai;
@@ -140,22 +123,16 @@ struct tty *tp;
u_short addr;
if (unit >= NLPR || (isai = lprinfo[unit]) == 0 || isai->alive == 0)
- return(ENXIO);
+ return (D_NO_SUCH_DEVICE);
tp = &lpr_tty[unit];
-#ifndef MACH_KERNEL
- if (tp->t_state & TS_XCLUDE && u.u_uid != 0)
- return(EBUSY);
-#endif /* MACH_KERNEL */
addr = (u_short) isai->address;
tp->t_dev = dev;
tp->t_addr = *(caddr_t *)&addr;
tp->t_oproc = lprstart;
tp->t_state |= TS_WOPEN;
-#ifdef MACH_KERNEL
tp->t_stop = lprstop;
tp->t_getstat = lprgetstat;
tp->t_setstat = lprsetstat;
-#endif /* MACH_KERNEL */
if ((tp->t_state & TS_ISOPEN) == 0)
ttychars(tp);
outb(INTR_ENAB(addr), inb(INTR_ENAB(addr)) | 0x10);
@@ -172,9 +149,6 @@ int unit = minor(dev);
struct tty *tp = &lpr_tty[unit];
u_short addr = (u_short) lprinfo[unit]->address;
-#ifndef MACH_KERNEL
- (*linesw[tp->t_line].l_close)(tp);
-#endif /* MACH_KERNEL */
ttyclose(tp);
if (tp->t_state&TS_HUPCLS || (tp->t_state&TS_ISOPEN)==0) {
outb(INTR_ENAB(addr), inb(INTR_ENAB(addr)) & 0x0f);
@@ -182,7 +156,6 @@ u_short addr = (u_short) lprinfo[unit]->address;
}
}
-#ifdef MACH_KERNEL
int
lprread(dev, ior)
int dev;
@@ -212,7 +185,7 @@ lprgetstat(dev, flavor, data, count)
dev_t dev;
int flavor;
int *data; /* pointer to OUT array */
-unsigned int *count; /* out */
+natural_t *count; /* out */
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
@@ -230,7 +203,7 @@ lprsetstat(dev, flavor, data, count)
dev_t dev;
int flavor;
int * data;
-unsigned int count;
+natural_t count;
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
@@ -244,43 +217,6 @@ unsigned int count;
}
return (D_SUCCESS);
}
-#else /* MACH_KERNEL */
-int lprwrite(dev, uio)
- int dev;
- struct uio *uio;
-{
- struct tty *tp= &lpr_tty[minor(dev)];
-
- return ((*linesw[tp->t_line].l_write)(tp, uio));
-}
-
-int lprioctl(dev, cmd, addr, mode)
- int dev;
- int cmd;
- caddr_t addr;
- int mode;
-{
- int error;
- spl_t s;
- int unit = minor(dev);
- struct tty *tp = &lpr_tty[unit];
-
- error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, addr,mode);
- if (error >= 0)
- return(error);
- error = ttioctl(tp, cmd, addr,mode);
- if (error >= 0)
- return (error);
- s = spltty();
- switch (cmd) {
- default:
- splx(s);
- return(ENOTTY);
- }
- splx(s);
- return(0);
-}
-#endif /* MACH_KERNEL */
void lprintr(unit)
int unit;
@@ -317,28 +253,15 @@ struct tty *tp;
}
if (tp->t_outq.c_cc <= TTLOWAT(tp)) {
-#ifdef MACH_KERNEL
tt_write_wakeup(tp);
-#else /* MACH_KERNEL */
- if (tp->t_state & TS_ASLEEP) {
- tp->t_state &= ~TS_ASLEEP;
- wakeup ((caddr_t)&tp->t_outq);
- }
- if (tp->t_wsel) {
- selwakeup(tp->t_wsel, tp->t_state & TS_WCOLL);
- tp->t_wsel = 0;
- tp->t_state &= ~TS_WCOLL;
- }
-#endif /* MACH_KERNEL */
}
if (tp->t_outq.c_cc == 0) {
splx(s);
return;
}
-#ifdef MACH_KERNEL
nch = getc(&tp->t_outq);
if ((tp->t_flags & LITOUT) == 0 && (nch & 0200)) {
- timeout(ttrstrt, (char *)tp, (nch & 0x7f) + 6);
+ timeout((timer_func_t *)ttrstrt, (char *)tp, (nch & 0x7f) + 6);
tp->t_state |= TS_TIMEOUT;
return;
}
@@ -346,32 +269,10 @@ struct tty *tp;
outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) | 0x01);
outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) & 0x1e);
tp->t_state |= TS_BUSY;
-#else /* MACH_KERNEL */
- if (tp->t_flags & (RAW|LITOUT))
- nch = ndqb(&tp->t_outq,0);
- else {
- nch = ndqb(&tp->t_outq, 0200);
- if (nch == 0) {
- nch = getc(&tp->t_outq);
- timeout(ttrstrt,(caddr_t)tp,(nch&0x7f)+6);
- tp->t_state |= TS_TIMEOUT;
- splx(s);
- return(0);
- }
- }
- if (nch) {
- nch=getc(&tp->t_outq);
- outb(DATA(addr), nch);
- outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) | 0x01);
- outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) & 0x1e);
- tp->t_state |= TS_BUSY;
- }
-#endif /* MACH_KERNEL */
splx(s);
return;
}
-#ifdef MACH_KERNEL
void
lprstop(tp, flags)
register struct tty *tp;
@@ -380,17 +281,6 @@ int flags;
if ((tp->t_state & TS_BUSY) && (tp->t_state & TS_TTSTOP) == 0)
tp->t_state |= TS_FLUSH;
}
-#else /* MACH_KERNEL */
-void lprstop(tp, flag)
-struct tty *tp;
-{
- int s = spltty();
-
- if ((tp->t_state&TS_BUSY) && (!(tp->t_state&TS_TTSTOP)))
- tp->t_state |= TS_FLUSH;
- splx(s);
-}
-#endif /* MACH_KERNEL */
int
lprpr(unit)
{
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index 04b8228f..3db03d76 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -128,8 +128,6 @@ extern void setup_main();
void halt_all_cpus (boolean_t reboot) __attribute__ ((noreturn));
void halt_cpu (void) __attribute__ ((noreturn));
-int discover_x86_cpu_type (void);
-
void inittodr(); /* forward */
int rebootflag = 0; /* exported to kdintr */
@@ -263,6 +261,8 @@ void db_reset_cpu(void)
void
mem_size_init(void)
{
+ vm_offset_t max_phys_size;
+
/* Physical memory on all PCs starts at physical address 0.
XX make it a constant. */
phys_first_addr = 0;
@@ -284,15 +284,16 @@ mem_size_init(void)
phys_last_addr = phys_last_kb * 0x400;
#endif /* MACH_HYP */
- printf("AT386 boot: physical memory from 0x%x to 0x%x\n",
+ printf("AT386 boot: physical memory from 0x%lx to 0x%lx\n",
phys_first_addr, phys_last_addr);
- /* Reserve 1/6 of the memory address space for virtual mappings.
+ /* Reserve room for virtual mappings.
* Yes, this loses memory. Blame i386. */
- if (phys_last_addr > ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 6) * 5) {
- phys_last_addr = ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 6) * 5;
- printf("Truncating memory size to %dMiB\n", (phys_last_addr - phys_first_addr) / (1024 * 1024));
- /* TODO Xen: free lost memory */
+ max_phys_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS - VM_KERNEL_MAP_SIZE;
+ if (phys_last_addr - phys_first_addr > max_phys_size) {
+ phys_last_addr = phys_first_addr + max_phys_size;
+ printf("Truncating memory size to %luMiB\n", (phys_last_addr - phys_first_addr) / (1024 * 1024));
+ /* TODO Xen: be nice, free lost memory */
}
phys_first_addr = round_page(phys_first_addr);
@@ -318,7 +319,7 @@ i386at_init(void)
/* XXX move to intel/pmap.h */
extern pt_entry_t *kernel_page_dir;
int nb_direct, i;
- vm_offset_t addr;
+ vm_offset_t addr, delta;
/*
* Initialize the PIC prior to any possible call to an spl.
@@ -380,29 +381,34 @@ i386at_init(void)
pmap_bootstrap();
/*
- * Turn paging on.
* We'll have to temporarily install a direct mapping
* between physical memory and low linear memory,
* until we start using our new kernel segment descriptors.
- * Also, set the WP bit so that on 486 or better processors
- * page-level write protection works in kernel mode.
*/
- init_alloc_aligned(0, &addr);
- nb_direct = (addr + NPTES * PAGE_SIZE - 1) / (NPTES * PAGE_SIZE);
+#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+ delta = INIT_VM_MIN_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS;
+ if ((vm_offset_t)(-delta) < delta)
+ delta = (vm_offset_t)(-delta);
+ nb_direct = delta >> PDESHIFT;
for (i = 0; i < nb_direct; i++)
- kernel_page_dir[lin2pdenum(VM_MIN_KERNEL_ADDRESS) + i] =
+ kernel_page_dir[lin2pdenum(INIT_VM_MIN_KERNEL_ADDRESS) + i] =
kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS) + i];
+#endif
+ /* We need BIOS memory mapped at 0xc0000 & co for Linux drivers */
+#ifdef LINUX_DEV
+#if VM_MIN_KERNEL_ADDRESS != 0
+ kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)] =
+ kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)];
+#endif
+#endif
-#ifdef MACH_XEN
- {
- int i;
- for (i = 0; i < PDPNUM; i++)
- pmap_set_page_readonly_init((void*) kernel_page_dir + i * INTEL_PGBYTES);
+#ifdef MACH_PV_PAGETABLES
+ for (i = 0; i < PDPNUM; i++)
+ pmap_set_page_readonly_init((void*) kernel_page_dir + i * INTEL_PGBYTES);
#if PAE
- pmap_set_page_readonly_init(kernel_pmap->pdpbase);
+ pmap_set_page_readonly_init(kernel_pmap->pdpbase);
#endif /* PAE */
- }
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#if PAE
set_cr3((unsigned)_kvtophys(kernel_pmap->pdpbase));
#ifndef MACH_HYP
@@ -414,16 +420,19 @@ i386at_init(void)
set_cr3((unsigned)_kvtophys(kernel_page_dir));
#endif /* PAE */
#ifndef MACH_HYP
- /* already set by Hypervisor */
+ /* Turn paging on.
+ * Also set the WP bit so that on 486 or better processors
+ * page-level write protection works in kernel mode.
+ */
set_cr0(get_cr0() | CR0_PG | CR0_WP);
set_cr0(get_cr0() & ~(CR0_CD | CR0_NW));
if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
set_cr4(get_cr4() | CR4_PGE);
#endif /* MACH_HYP */
flush_instr_queue();
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_clear_bootstrap_pagetable((void *)boot_info.pt_base);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
/* Interrupt stacks are allocated in physical memory,
while kernel stacks are allocated in kernel virtual memory,
@@ -441,6 +450,7 @@ i386at_init(void)
ldt_init();
ktss_init();
+#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
/* Get rid of the temporary direct mapping and flush it out of the TLB. */
for (i = 0 ; i < nb_direct; i++) {
#ifdef MACH_XEN
@@ -451,9 +461,17 @@ i386at_init(void)
#endif /* MACH_PSEUDO_PHYS */
printf("couldn't unmap frame %d\n", i);
#else /* MACH_XEN */
- kernel_page_dir[lin2pdenum(VM_MIN_KERNEL_ADDRESS) + i] = 0;
+ kernel_page_dir[lin2pdenum(INIT_VM_MIN_KERNEL_ADDRESS) + i] = 0;
#endif /* MACH_XEN */
}
+#endif
+ /* Keep BIOS memory mapped */
+#ifdef LINUX_DEV
+#if VM_MIN_KERNEL_ADDRESS != 0
+ kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)] =
+ kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)];
+#endif
+#endif
/* Not used after boot, better give it back. */
#ifdef MACH_XEN
@@ -485,7 +503,7 @@ void c_boot_entry(vm_offset_t bi)
/* Before we do _anything_ else, print the hello message.
If there are no initialized console devices yet,
it will be stored and printed at the first opportunity. */
- printf(version);
+ printf("%s", version);
printf("\n");
#ifdef MACH_XEN
@@ -513,7 +531,7 @@ void c_boot_entry(vm_offset_t bi)
strtab_size = (vm_offset_t)phystokv(boot_info.syms.a.strsize);
kern_sym_end = kern_sym_start + 4 + symtab_size + strtab_size;
- printf("kernel symbol table at %08x-%08x (%d,%d)\n",
+ printf("kernel symbol table at %08lx-%08lx (%d,%d)\n",
kern_sym_start, kern_sym_end,
symtab_size, strtab_size);
}
@@ -699,9 +717,9 @@ init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
}
/* Skip our own kernel code, data, and bss. */
- if ((avail_next > (vm_offset_t)start) && (addr < (vm_offset_t)end))
+ if ((phystokv(avail_next) > (vm_offset_t)start) && (phystokv(addr) < (vm_offset_t)end))
{
- avail_next = (vm_offset_t)end;
+ avail_next = _kvtophys(end);
goto retry;
}
@@ -716,9 +734,9 @@ init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
avail_next = mods_end_pa;
goto retry;
}
- if ((avail_next > kern_sym_start) && (addr < kern_sym_end))
+ if ((phystokv(avail_next) > kern_sym_start) && (phystokv(addr) < kern_sym_end))
{
- avail_next = kern_sym_end;
+ avail_next = _kvtophys(kern_sym_end);
goto retry;
}
if (boot_info.flags & MULTIBOOT_MODS)
diff --git a/i386/i386at/pic_isa.c b/i386/i386at/pic_isa.c
index ef7d84c7..e48fb507 100644
--- a/i386/i386at/pic_isa.c
+++ b/i386/i386at/pic_isa.c
@@ -27,11 +27,11 @@
#include <sys/types.h>
#include <i386/ipl.h>
#include <i386/pic.h>
-
+#include <i386/fpu.h>
+#include <i386at/kd.h>
/* These interrupts are always present */
-extern void intnull(), fpintr(), hardclock(), kdintr();
-extern void prtnull();
+extern void hardclock();
void (*ivect[NINTR])() = {
/* 00 */ hardclock, /* always */
diff --git a/i386/i386at/rtc.c b/i386/i386at/rtc.c
index e0a03dee..67768013 100644
--- a/i386/i386at/rtc.c
+++ b/i386/i386at/rtc.c
@@ -53,7 +53,6 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <i386/pio.h>
#include <i386at/rtc.h>
-static unsigned char rtc[RTC_NREG];
static int first_rtcopen_ever = 1;
void
@@ -139,11 +138,7 @@ readtodc(tp)
int i, days = 0;
spl_t ospl;
-#ifdef MACH_KERNEL
ospl = splclock();
-#else /* MACH_KERNEL */
- ospl = spl5();
-#endif /* MACH_KERNEL */
if (rtcget(&rtclk)) {
splx(ospl);
return(-1);
@@ -170,12 +165,6 @@ readtodc(tp)
days += yeartoday(i);
n += days * 3600 * 24;
-#ifdef MACH_KERNEL
-#else /* MACH_KERNEL */
- n += tz.tz_minuteswest * 60;
- if (tz.tz_dsttime)
- n -= 3600;
-#endif /* MACH_KERNEL */
*tp = n;
@@ -190,24 +179,14 @@ writetodc()
int diff, i, j;
spl_t ospl;
-#ifdef MACH_KERNEL
ospl = splclock();
-#else /* MACH_KERNEL */
- ospl = spl5();
-#endif /* MACH_KERNEL */
if (rtcget(&rtclk)) {
splx(ospl);
return(-1);
}
splx(ospl);
-#ifdef MACH_KERNEL
diff = 0;
-#else /* MACH_KERNEL */
- diff = tz.tz_minuteswest * 60;
- if (tz.tz_dsttime)
- diff -= 3600;
-#endif /* MACH_KERNEL */
n = (time.tv_sec - diff) % (3600 * 24); /* hrs+mins+secs */
rtclk.rtc_sec = dectohexdec(n%60);
n /= 60;
@@ -231,11 +210,7 @@ writetodc()
rtclk.rtc_dom = dectohexdec(++n);
-#ifdef MACH_KERNEL
ospl = splclock();
-#else /* MACH_KERNEL */
- ospl = spl5();
-#endif /* MACH_KERNEL */
rtcput(&rtclk);
splx(ospl);
diff --git a/i386/include/mach/i386/exec/elf.h b/i386/include/mach/i386/exec/elf.h
index 5155b3d5..cfa988d2 100644
--- a/i386/include/mach/i386/exec/elf.h
+++ b/i386/include/mach/i386/exec/elf.h
@@ -23,11 +23,11 @@
#ifndef _MACH_I386_EXEC_ELF_H_
#define _MACH_I386_EXEC_ELF_H_
-typedef unsigned long Elf32_Addr;
+typedef unsigned int Elf32_Addr;
typedef unsigned short Elf32_Half;
-typedef unsigned long Elf32_Off;
-typedef signed long Elf32_Sword;
-typedef unsigned long Elf32_Word;
+typedef unsigned int Elf32_Off;
+typedef signed int Elf32_Sword;
+typedef unsigned int Elf32_Word;
/* Architecture identification parameters for i386. */
#define MY_EI_DATA ELFDATA2LSB
diff --git a/i386/include/mach/i386/vm_param.h b/i386/include/mach/i386/vm_param.h
index 6d7c5f3c..8f708f0f 100644
--- a/i386/include/mach/i386/vm_param.h
+++ b/i386/include/mach/i386/vm_param.h
@@ -52,8 +52,8 @@
* No rounding is used.
*/
-#define i386_btop(x) (((unsigned)(x)) >> I386_PGSHIFT)
-#define i386_ptob(x) (((unsigned)(x)) << I386_PGSHIFT)
+#define i386_btop(x) (((unsigned long)(x)) >> I386_PGSHIFT)
+#define i386_ptob(x) (((unsigned long)(x)) << I386_PGSHIFT)
/*
* Round off or truncate to the nearest page. These will work
@@ -61,9 +61,9 @@
* bytes.)
*/
-#define i386_round_page(x) ((((unsigned)(x)) + I386_PGBYTES - 1) & \
+#define i386_round_page(x) ((((unsigned long)(x)) + I386_PGBYTES - 1) & \
~(I386_PGBYTES-1))
-#define i386_trunc_page(x) (((unsigned)(x)) & ~(I386_PGBYTES-1))
+#define i386_trunc_page(x) (((unsigned long)(x)) & ~(I386_PGBYTES-1))
/* User address spaces are 3GB each,
starting at virtual and linear address 0.
diff --git a/i386/include/mach/i386/vm_types.h b/i386/include/mach/i386/vm_types.h
index d54008ef..1439940b 100644
--- a/i386/include/mach/i386/vm_types.h
+++ b/i386/include/mach/i386/vm_types.h
@@ -73,7 +73,7 @@ typedef unsigned int uint32;
* A vm_offset_t is a type-neutral pointer,
* e.g. an offset into a virtual memory space.
*/
-typedef natural_t vm_offset_t;
+typedef unsigned long vm_offset_t;
typedef vm_offset_t * vm_offset_array_t;
/*
@@ -88,11 +88,11 @@ typedef natural_t vm_size_t;
*/
typedef signed char signed8_t;
typedef signed short signed16_t;
-typedef signed long signed32_t;
+typedef signed int signed32_t;
typedef signed long long signed64_t;
typedef unsigned char unsigned8_t;
typedef unsigned short unsigned16_t;
-typedef unsigned long unsigned32_t;
+typedef unsigned int unsigned32_t;
typedef unsigned long long unsigned64_t;
typedef float float32_t;
typedef double float64_t;
diff --git a/i386/include/mach/sa/stdarg.h b/i386/include/mach/sa/stdarg.h
index ba0f78a1..550fec4f 100644
--- a/i386/include/mach/sa/stdarg.h
+++ b/i386/include/mach/sa/stdarg.h
@@ -39,7 +39,7 @@ typedef __builtin_va_list va_list;
#else
-#define __va_size(type) ((sizeof(type)+3) & ~0x3)
+#define __va_size(type) ((sizeof(type)+sizeof(unsigned long)-1) & ~(sizeof(unsigned long)-1))
#ifndef _VA_LIST_
#define _VA_LIST_
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 0b8ae903..490c1d95 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -63,7 +63,7 @@
#include <kern/debug.h>
#include <kern/printf.h>
#include <kern/thread.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <kern/lock.h>
@@ -113,7 +113,7 @@ pv_entry_t pv_head_table; /* array of entries, one per page */
/*
* pv_list entries are kept on a list that can only be accessed
* with the pmap system locked (at SPLVM, not in the cpus_active set).
- * The list is refilled from the pv_list_zone if it becomes empty.
+ * The list is refilled from the pv_list_cache if it becomes empty.
*/
pv_entry_t pv_free_list; /* free list at SPLVM */
decl_simple_lock_data(, pv_free_list_lock)
@@ -133,7 +133,7 @@ decl_simple_lock_data(, pv_free_list_lock)
simple_unlock(&pv_free_list_lock); \
}
-zone_t pv_list_zone; /* zone of pv_entry structures */
+struct kmem_cache pv_list_cache; /* cache of pv_entry structures */
/*
* Each entry in the pv_head_table is locked by a bit in the
@@ -155,9 +155,6 @@ boolean_t pmap_initialized = FALSE;
vm_offset_t kernel_virtual_start;
vm_offset_t kernel_virtual_end;
-/* XXX stupid fixed limit - get rid */
-vm_size_t morevm = 128 * 1024 * 1024; /* VM space for kernel map */
-
/*
* Index into pv_head table, its lock bits, and the modify/reference
* bits starting at phys_first_addr.
@@ -330,7 +327,7 @@ lock_data_t pmap_system_lock;
#define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */
-#ifdef MACH_HYP
+#ifdef MACH_PV_PAGETABLES
#if 1
#define INVALIDATE_TLB(pmap, s, e) hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL)
#else
@@ -342,7 +339,7 @@ lock_data_t pmap_system_lock;
hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL); \
} while(0)
#endif
-#else /* MACH_HYP */
+#else /* MACH_PV_PAGETABLES */
#if 0
/* It is hard to know when a TLB flush becomes less expensive than a bunch of
* invlpgs. But it surely is more expensive than just one invlpg. */
@@ -361,7 +358,7 @@ lock_data_t pmap_system_lock;
flush_tlb(); \
}
#endif
-#endif /* MACH_HYP */
+#endif /* MACH_PV_PAGETABLES */
#if NCPUS > 1
@@ -403,7 +400,7 @@ struct pmap_update_list cpu_update_list[NCPUS];
struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
-struct zone *pmap_zone; /* zone of pmap structures */
+struct kmem_cache pmap_cache; /* cache of pmap structures */
int pmap_debug = 0; /* flag for debugging prints */
@@ -531,10 +528,10 @@ vm_offset_t pmap_map_bd(virt, start, end, prot)
register pt_entry_t template;
register pt_entry_t *pte;
int spl;
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
int n, i = 0;
struct mmu_update update[HYP_BATCH_MMU_UPDATES];
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
template = pa_to_pte(start)
| INTEL_PTE_NCACHE|INTEL_PTE_WTHRU
@@ -549,7 +546,7 @@ vm_offset_t pmap_map_bd(virt, start, end, prot)
pte = pmap_pte(kernel_pmap, virt);
if (pte == PT_ENTRY_NULL)
panic("pmap_map_bd: Invalid kernel address\n");
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
update[i].ptr = kv_to_ma(pte);
update[i].val = pa_to_ma(template);
i++;
@@ -559,20 +556,20 @@ vm_offset_t pmap_map_bd(virt, start, end, prot)
panic("couldn't pmap_map_bd\n");
i = 0;
}
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, template)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte_increment_pa(template);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (i > HYP_BATCH_MMU_UPDATES)
panic("overflowed array in pmap_map_bd");
hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
if (n != i)
panic("couldn't pmap_map_bd\n");
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
PMAP_READ_UNLOCK(pmap, spl);
return(virt);
}
@@ -620,8 +617,7 @@ void pmap_bootstrap()
* and extends to a stupid arbitrary limit beyond that.
*/
kernel_virtual_start = phystokv(phys_last_addr);
- kernel_virtual_end = phystokv(phys_last_addr) + morevm
- + (phys_last_addr - phys_first_addr);
+ kernel_virtual_end = phystokv(phys_last_addr) + VM_KERNEL_MAP_SIZE;
if (kernel_virtual_end < kernel_virtual_start
|| kernel_virtual_end > VM_MAX_KERNEL_ADDRESS)
@@ -653,7 +649,7 @@ void pmap_bootstrap()
kernel_pmap->dirbase[i] = 0;
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* We don't actually deal with the CR3 register content at all */
hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
/*
@@ -671,25 +667,27 @@ void pmap_bootstrap()
pt_entry_t *l1_map[NSUP_L1];
{
pt_entry_t *base = (pt_entry_t*) boot_info.pt_base;
- int i;
+ vm_offset_t la;
int n_l1map;
+ for (n_l1map = 0, la = VM_MIN_KERNEL_ADDRESS; la >= VM_MIN_KERNEL_ADDRESS; la += NPTES * PAGE_SIZE) {
#ifdef PAE
- pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[0]);
+ pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[lin2pdpnum(la)]);
#else /* PAE */
- pt_entry_t *l2_map = base;
+ pt_entry_t *l2_map = base;
#endif /* PAE */
- for (n_l1map = 0, i = lin2pdenum(VM_MIN_KERNEL_ADDRESS); i < NPTES; i++) {
- if (!(l2_map[i] & INTEL_PTE_VALID)) {
+ /* Like lin2pdenum, but works with non-contiguous boot L3 */
+ l2_map += (la >> PDESHIFT) & PDEMASK;
+ if (!(*l2_map & INTEL_PTE_VALID)) {
struct mmu_update update;
int j, n;
l1_map[n_l1map] = (pt_entry_t*) phystokv(pmap_grab_page());
for (j = 0; j < NPTES; j++)
- l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn((i - lin2pdenum(VM_MIN_KERNEL_ADDRESS)) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+ l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
pmap_set_page_readonly_init(l1_map[n_l1map]);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (l1_map[n_l1map])))
panic("couldn't pin page %p(%p)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map]));
- update.ptr = kv_to_ma(&l2_map[i]);
+ update.ptr = kv_to_ma(l2_map);
update.val = kv_to_ma(l1_map[n_l1map]) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
hyp_mmu_update(kv_to_la(&update), 1, kv_to_la(&n), DOMID_SELF);
if (n != 1)
@@ -700,7 +698,7 @@ void pmap_bootstrap()
}
}
}
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
/*
* Allocate and set up the kernel page tables.
@@ -720,7 +718,7 @@ void pmap_bootstrap()
* to allocate new kernel page tables later.
* XX fix this
*/
- for (va = phystokv(phys_first_addr); va < kernel_virtual_end; )
+ for (va = phystokv(phys_first_addr); va >= phystokv(phys_first_addr) && va < kernel_virtual_end; )
{
pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va));
pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page());
@@ -738,24 +736,24 @@ void pmap_bootstrap()
WRITE_PTE(pte, 0);
}
else
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (va == (vm_offset_t) &hyp_shared_info)
{
*pte = boot_info.shared_info | INTEL_PTE_VALID | INTEL_PTE_WRITE;
va += INTEL_PGBYTES;
}
else
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
{
extern char _start[], etext[];
if (((va >= (vm_offset_t) _start)
&& (va + INTEL_PGBYTES <= (vm_offset_t)etext))
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
|| (va >= (vm_offset_t) boot_info.pt_base
&& (va + INTEL_PGBYTES <=
(vm_offset_t) ptable + INTEL_PGBYTES))
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
)
{
WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
@@ -763,7 +761,7 @@ void pmap_bootstrap()
}
else
{
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* Keep supplementary L1 pages read-only */
int i;
for (i = 0; i < NSUP_L1; i++)
@@ -773,7 +771,7 @@ void pmap_bootstrap()
break;
}
if (i == NSUP_L1)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
| INTEL_PTE_VALID | INTEL_PTE_WRITE | global)
@@ -786,11 +784,11 @@ void pmap_bootstrap()
WRITE_PTE(pte, 0);
va += INTEL_PGBYTES;
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly_init(ptable);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (ptable)))
panic("couldn't pin page %p(%p)\n", ptable, (vm_offset_t) kv_to_ma (ptable));
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
}
@@ -798,7 +796,7 @@ void pmap_bootstrap()
soon after we return from here. */
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* These are only required because of Xen security policies */
/* Set back a page read write */
@@ -830,26 +828,27 @@ void pmap_set_page_readonly(void *_vaddr) {
}
/* This needs to be called instead of pmap_set_page_readonly as long as RC3
- * still points to the bootstrap dirbase. */
+ * still points to the bootstrap dirbase, to also fix the bootstrap table. */
void pmap_set_page_readonly_init(void *_vaddr) {
vm_offset_t vaddr = (vm_offset_t) _vaddr;
#if PAE
pt_entry_t *pdpbase = (void*) boot_info.pt_base;
- vm_offset_t dirbase = ptetokv(pdpbase[0]);
+ /* The bootstrap table does not necessarily use contiguous pages for the pde tables */
+ pt_entry_t *dirbase = (void*) ptetokv(pdpbase[lin2pdpnum(vaddr)]);
#else
- vm_offset_t dirbase = boot_info.pt_base;
+ pt_entry_t *dirbase = (void*) boot_info.pt_base;
#endif
- struct pmap linear_pmap = {
- .dirbase = (void*) dirbase,
- };
+ pt_entry_t *pte = &dirbase[lin2pdenum(vaddr) & PTEMASK];
/* Modify our future kernel map (can't use update_va_mapping for this)... */
- if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID)
+ if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) {
if (!hyp_mmu_update_la (kvtolin(vaddr), pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID))
panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ }
/* ... and the bootstrap map. */
- if (*pmap_pde(&linear_pmap, vaddr) & INTEL_PTE_VALID)
+ if (*pte & INTEL_PTE_VALID) {
if (hyp_do_update_va_mapping (vaddr, pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID, UVMF_NONE))
panic("couldn't set MMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ }
}
void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
@@ -872,7 +871,7 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
#endif /* PAE */
for (i = 0; i < NPTES; i++) {
pt_entry_t pde = dir[i];
- unsigned long pfn = mfn_to_pfn(atop(pde));
+ unsigned long pfn = atop(pte_to_pa(pde));
void *pgt = (void*) phystokv(ptoa(pfn));
if (pde & INTEL_PTE_VALID)
hyp_free_page(pfn, pgt);
@@ -890,7 +889,7 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
#endif /* PAE */
hyp_free_page(atop(_kvtophys(base)), base);
}
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
void pmap_virtual_space(startp, endp)
vm_offset_t *startp;
@@ -941,13 +940,13 @@ void pmap_init()
pmap_phys_attributes = (char *) addr;
/*
- * Create the zone of physical maps,
+ * Create the cache of physical maps,
* and of the physical-to-virtual entries.
*/
s = (vm_size_t) sizeof(struct pmap);
- pmap_zone = zinit(s, 0, 400*s, 4096, 0, "pmap"); /* XXX */
+ kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, NULL, NULL, 0);
s = (vm_size_t) sizeof(struct pv_entry);
- pv_list_zone = zinit(s, 0, 10000*s, 4096, 0, "pv_list"); /* XXX */
+ kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, NULL, NULL, 0);
#if NCPUS > 1
/*
@@ -1013,7 +1012,7 @@ pmap_page_table_page_alloc()
/*
* We cannot allocate the pmap_object in pmap_init,
- * because it is called before the zone package is up.
+ * because it is called before the cache package is up.
* Allocate it now if it is missing.
*/
if (pmap_object == VM_OBJECT_NULL)
@@ -1052,21 +1051,38 @@ void pmap_map_mfn(void *_addr, unsigned long mfn) {
pt_entry_t *pte, *pdp;
vm_offset_t ptp;
pt_entry_t ma = ((pt_entry_t) mfn) << PAGE_SHIFT;
+
+ /* Add a ptp if none exist yet for this pte */
if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL) {
ptp = phystokv(pmap_page_table_page_alloc());
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly((void*) ptp);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, pa_to_mfn(ptp)))
panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
+#endif /* MACH_PV_PAGETABLES */
pdp = pmap_pde(kernel_pmap, addr);
+
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pdp),
pa_to_pte(kv_to_ma(ptp)) | INTEL_PTE_VALID
| INTEL_PTE_USER
| INTEL_PTE_WRITE))
panic("%s:%d could not set pde %p(%p) to %p(%p)\n",__FILE__,__LINE__,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp), ptp, (vm_offset_t) pa_to_ma(ptp));
+#else /* MACH_PV_PAGETABLES */
+ *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
pte = pmap_pte(kernel_pmap, addr);
}
+
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte), ma | INTEL_PTE_VALID | INTEL_PTE_WRITE))
panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte), ma, ma_to_pa(ma));
+#else /* MACH_PV_PAGETABLES */
+ /* Note: in this case, mfn is actually a pfn. */
+ WRITE_PTE(pte, ma | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+#endif /* MACH_PV_PAGETABLES */
}
#endif /* MACH_XEN */
@@ -1117,11 +1133,11 @@ pmap_t pmap_create(size)
}
/*
- * Allocate a pmap struct from the pmap_zone. Then allocate
- * the page descriptor table from the pd_zone.
+ * Allocate a pmap struct from the pmap_cache. Then allocate
+ * the page descriptor table.
*/
- p = (pmap_t) zalloc(pmap_zone);
+ p = (pmap_t) kmem_cache_alloc(&pmap_cache);
if (p == PMAP_NULL)
panic("pmap_create");
@@ -1131,13 +1147,19 @@ pmap_t pmap_create(size)
panic("pmap_create");
memcpy(p->dirbase, kernel_page_dir, PDPNUM * INTEL_PGBYTES);
-#ifdef MACH_XEN
+#ifdef LINUX_DEV
+#if VM_MIN_KERNEL_ADDRESS != 0
+ /* Do not map BIOS in user tasks */
+ p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)] = 0;
+#endif
+#endif
+#ifdef MACH_PV_PAGETABLES
{
int i;
for (i = 0; i < PDPNUM; i++)
pmap_set_page_readonly((void*) p->dirbase + i * INTEL_PGBYTES);
}
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#if PAE
if (kmem_alloc_wired(kernel_map,
@@ -1149,9 +1171,9 @@ pmap_t pmap_create(size)
for (i = 0; i < PDPNUM; i++)
WRITE_PTE(&p->pdpbase[i], pa_to_pte(kvtophys((vm_offset_t) p->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID);
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly(p->pdpbase);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#endif /* PAE */
p->ref_count = 1;
@@ -1211,32 +1233,32 @@ void pmap_destroy(p)
if (m == VM_PAGE_NULL)
panic("pmap_destroy: pte page not in object");
vm_page_lock_queues();
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, pa_to_mfn(pa)))
panic("pmap_destroy: couldn't unpin page %p(%p)\n", pa, (vm_offset_t) kv_to_ma(pa));
pmap_set_page_readwrite((void*) phystokv(pa));
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
vm_page_free(m);
inuse_ptepages_count--;
vm_page_unlock_queues();
vm_object_unlock(pmap_object);
}
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
{
int i;
for (i = 0; i < PDPNUM; i++)
pmap_set_page_readwrite((void*) p->dirbase + i * INTEL_PGBYTES);
}
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
kmem_free(kernel_map, (vm_offset_t)p->dirbase, PDPNUM * INTEL_PGBYTES);
#if PAE
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readwrite(p->pdpbase);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES);
#endif /* PAE */
- zfree(pmap_zone, (vm_offset_t) p);
+ kmem_cache_free(&pmap_cache, (vm_offset_t) p);
}
/*
@@ -1279,10 +1301,10 @@ void pmap_remove_range(pmap, va, spte, epte)
int num_removed, num_unwired;
int pai;
vm_offset_t pa;
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
int n, ii = 0;
struct mmu_update update[HYP_BATCH_MMU_UPDATES];
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#if DEBUG_PTE_PAGE
if (pmap != kernel_pmap)
@@ -1311,7 +1333,7 @@ void pmap_remove_range(pmap, va, spte, epte)
register int i = ptes_per_vm_page;
register pt_entry_t *lpte = cpte;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
update[ii].ptr = kv_to_ma(lpte);
update[ii].val = 0;
ii++;
@@ -1321,9 +1343,9 @@ void pmap_remove_range(pmap, va, spte, epte)
panic("couldn't pmap_remove_range\n");
ii = 0;
}
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*lpte = 0;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
lpte++;
} while (--i > 0);
continue;
@@ -1344,7 +1366,7 @@ void pmap_remove_range(pmap, va, spte, epte)
do {
pmap_phys_attributes[pai] |=
*lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
update[ii].ptr = kv_to_ma(lpte);
update[ii].val = 0;
ii++;
@@ -1354,9 +1376,9 @@ void pmap_remove_range(pmap, va, spte, epte)
panic("couldn't pmap_remove_range\n");
ii = 0;
}
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*lpte = 0;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
lpte++;
} while (--i > 0);
}
@@ -1402,13 +1424,13 @@ void pmap_remove_range(pmap, va, spte, epte)
}
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (ii > HYP_BATCH_MMU_UPDATES)
panic("overflowed array in pmap_remove_range");
hyp_mmu_update(kvtolin(&update), ii, kvtolin(&n), DOMID_SELF);
if (n != ii)
panic("couldn't pmap_remove_range\n");
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
/*
* Update the counts
@@ -1554,12 +1576,12 @@ void pmap_page_protect(phys, prot)
do {
pmap_phys_attributes[pai] |=
*pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte++), 0))
panic("%s:%d could not clear pte %p\n",__FILE__,__LINE__,pte-1);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pte++ = 0;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
} while (--i > 0);
}
@@ -1589,12 +1611,12 @@ void pmap_page_protect(phys, prot)
register int i = ptes_per_vm_page;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WRITE))
- panic("%s:%d could not enable write on pte %p\n",__FILE__,__LINE__,pte);
-#else /* MACH_XEN */
+ panic("%s:%d could not disable write on pte %p\n",__FILE__,__LINE__,pte);
+#else /* MACH_PV_PAGETABLES */
*pte &= ~INTEL_PTE_WRITE;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte++;
} while (--i > 0);
@@ -1683,14 +1705,14 @@ void pmap_protect(map, s, e, prot)
spte = &spte[ptenum(s)];
epte = &spte[intel_btop(l-s)];
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
int n, i = 0;
struct mmu_update update[HYP_BATCH_MMU_UPDATES];
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
while (spte < epte) {
if (*spte & INTEL_PTE_VALID) {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
update[i].ptr = kv_to_ma(spte);
update[i].val = *spte & ~INTEL_PTE_WRITE;
i++;
@@ -1700,19 +1722,19 @@ void pmap_protect(map, s, e, prot)
panic("couldn't pmap_protect\n");
i = 0;
}
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*spte &= ~INTEL_PTE_WRITE;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
spte++;
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (i > HYP_BATCH_MMU_UPDATES)
panic("overflowed array in pmap_protect");
hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
if (n != i)
panic("couldn't pmap_protect\n");
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
s = l;
pde++;
@@ -1751,7 +1773,7 @@ void pmap_enter(pmap, v, pa, prot, wired)
vm_offset_t old_pa;
assert(pa != vm_page_fictitious_addr);
-if (pmap_debug) printf("pmap(%x, %x)\n", v, pa);
+if (pmap_debug) printf("pmap(%lx, %lx)\n", v, pa);
if (pmap == PMAP_NULL)
return;
@@ -1786,7 +1808,7 @@ if (pmap_debug) printf("pmap(%x, %x)\n", v, pa);
/*
* Must allocate a new pvlist entry while we're unlocked;
- * zalloc may cause pageout (which will lock the pmap system).
+ * Allocating may cause pageout (which will lock the pmap system).
* If we determine we need a pvlist entry, we will unlock
* and allocate one. Then we will retry, throughing away
* the allocated entry later (if we no longer need it).
@@ -1849,7 +1871,7 @@ Retry:
/*XX pdp = &pmap->dirbase[pdenum(v) & ~(i-1)];*/
pdp = pmap_pde(pmap, v);
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly((void *) ptp);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn(ptp)))
panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
@@ -1858,11 +1880,11 @@ Retry:
| INTEL_PTE_USER
| INTEL_PTE_WRITE))
panic("%s:%d could not set pde %p(%p,%p) to %p(%p,%p) %p\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp)));
-#else /* MACH_XEN */
- *pdp = pa_to_pte(ptp) | INTEL_PTE_VALID
- | INTEL_PTE_USER
- | INTEL_PTE_WRITE;
-#endif /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
+ *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
pdp++;
ptp += INTEL_PGBYTES;
} while (--i > 0);
@@ -1902,12 +1924,12 @@ Retry:
do {
if (*pte & INTEL_PTE_MOD)
template |= INTEL_PTE_MOD;
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template)))
panic("%s:%d could not set pte %p to %p\n",__FILE__,__LINE__,pte,template);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, template)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte++;
pte_increment_pa(template);
} while (--i > 0);
@@ -1970,9 +1992,9 @@ Retry:
PMAP_READ_UNLOCK(pmap, spl);
/*
- * Refill from zone.
+ * Refill from cache.
*/
- pv_e = (pv_entry_t) zalloc(pv_list_zone);
+ pv_e = (pv_entry_t) kmem_cache_alloc(&pv_list_cache);
goto Retry;
}
}
@@ -2012,12 +2034,12 @@ Retry:
template |= INTEL_PTE_WIRED;
i = ptes_per_vm_page;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!(hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template))))
panic("%s:%d could not set pte %p to %p\n",__FILE__,__LINE__,pte,template);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, template)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte++;
pte_increment_pa(template);
} while (--i > 0);
@@ -2072,12 +2094,12 @@ void pmap_change_wiring(map, v, wired)
map->stats.wired_count--;
i = ptes_per_vm_page;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WIRED)))
panic("%s:%d could not wire down pte %p\n",__FILE__,__LINE__,pte);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pte &= ~INTEL_PTE_WIRED;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte++;
} while (--i > 0);
}
@@ -2208,7 +2230,7 @@ void pmap_collect(p)
register int i = ptes_per_vm_page;
register pt_entry_t *pdep = pdp;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
unsigned long pte = *pdep;
void *ptable = (void*) ptetokv(pte);
if (!(hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdep++)), 0)))
@@ -2216,9 +2238,9 @@ void pmap_collect(p)
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(ptable)))
panic("couldn't unpin page %p(%p)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable)));
pmap_set_page_readwrite(ptable);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pdep++ = 0;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
} while (--i > 0);
}
@@ -2435,12 +2457,12 @@ phys_attribute_clear(phys, bits)
{
register int i = ptes_per_vm_page;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~bits)))
panic("%s:%d could not clear bits %lx from pte %p\n",__FILE__,__LINE__,bits,pte);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pte &= ~bits;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
} while (--i > 0);
}
PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
@@ -2790,7 +2812,7 @@ void pmap_update_interrupt()
}
#endif /* NCPUS > 1 */
-#ifdef i386
+#if defined(__i386__)
/* Unmap page 0 to trap NULL references. */
void
pmap_unmap_page_zero ()
@@ -2801,12 +2823,12 @@ pmap_unmap_page_zero ()
if (!pte)
return;
assert (pte);
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte), 0))
printf("couldn't unmap page 0\n");
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pte = 0;
INVALIDATE_TLB(kernel_pmap, 0, PAGE_SIZE);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
-#endif /* i386 */
+#endif /* __i386__ */
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 7ba7d2cb..93293e3d 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -37,7 +37,6 @@
#ifndef __ASSEMBLER__
-#include <kern/zalloc.h>
#include <kern/lock.h>
#include <mach/machine/vm_param.h>
#include <mach/vm_statistics.h>
@@ -49,7 +48,7 @@
* Define the generic in terms of the specific
*/
-#if i386
+#if defined(__i386__)
#define INTEL_PGBYTES I386_PGBYTES
#define INTEL_PGSHIFT I386_PGSHIFT
#define intel_btop(x) i386_btop(x)
@@ -59,7 +58,7 @@
#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
#define round_intel_to_vm(x) round_i386_to_vm(x)
#define vm_to_intel(x) vm_to_i386(x)
-#endif /* i386 */
+#endif /* __i386__ */
/*
* i386/i486 Page Table Entry
@@ -102,6 +101,13 @@ typedef unsigned int pt_entry_t;
#endif
/*
+ * Convert linear offset to page directory pointer index
+ */
+#if PAE
+#define lin2pdpnum(a) (((a) >> PDPSHIFT) & PDPMASK)
+#endif
+
+/*
* Convert page descriptor index to linear address
*/
#define pdenum2lin(a) ((vm_offset_t)(a) << PDESHIFT)
@@ -126,15 +132,15 @@ typedef unsigned int pt_entry_t;
#define INTEL_PTE_NCACHE 0x00000010
#define INTEL_PTE_REF 0x00000020
#define INTEL_PTE_MOD 0x00000040
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* Not supported */
#define INTEL_PTE_GLOBAL 0x00000000
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
#define INTEL_PTE_GLOBAL 0x00000100
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#define INTEL_PTE_WIRED 0x00000200
#ifdef PAE
-#define INTEL_PTE_PFN 0xfffffffffffff000ULL
+#define INTEL_PTE_PFN 0x00007ffffffff000ULL
#else
#define INTEL_PTE_PFN 0xfffff000
#endif
@@ -172,13 +178,13 @@ typedef struct pmap *pmap_t;
#define PMAP_NULL ((pmap_t) 0)
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
extern void pmap_set_page_readwrite(void *addr);
extern void pmap_set_page_readonly(void *addr);
extern void pmap_set_page_readonly_init(void *addr);
extern void pmap_map_mfn(void *addr, unsigned long mfn);
extern void pmap_clear_bootstrap_pagetable(pt_entry_t *addr);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#if PAE
#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->pdpbase))
diff --git a/i386/ldscript b/i386/ldscript
index f2b90fa2..ddbbf910 100644
--- a/i386/ldscript
+++ b/i386/ldscript
@@ -13,6 +13,7 @@ SECTIONS
*/
. = _START;
.text :
+ AT (_START_MAP)
{
*(.text.start)
*(.text .stub .text.* .gnu.linkonce.t.*)
@@ -69,7 +70,8 @@ SECTIONS
.rodata1 : { *(.rodata1) }
.eh_frame_hdr : { *(.eh_frame_hdr) }
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
- .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
diff --git a/i386/xen/Makefrag.am b/i386/xen/Makefrag.am
index b15b7db1..ecb33ffc 100644
--- a/i386/xen/Makefrag.am
+++ b/i386/xen/Makefrag.am
@@ -28,6 +28,7 @@ libkernel_a_SOURCES += \
if PLATFORM_xen
gnumach_LINKFLAGS += \
- --defsym _START=0x20000000 \
+ --defsym _START=0xC0000000 \
+ --defsym _START_MAP=0xC0000000 \
-T '$(srcdir)'/i386/ldscript
endif
diff --git a/i386/xen/xen.c b/i386/xen/xen.c
index aa3c2cc8..a46ee2c6 100644
--- a/i386/xen/xen.c
+++ b/i386/xen/xen.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,6 +18,7 @@
#include <kern/printf.h>
#include <kern/debug.h>
+#include <kern/mach_clock.h>
#include <mach/machine/eflags.h>
#include <machine/thread.h>
@@ -43,7 +44,6 @@ void hyp_failsafe_c_callback(struct failsafe_callback_regs *regs) {
panic("failsafe");
}
-extern void clock_interrupt();
extern void return_to_iret;
void hypclock_machine_intr(int old_ipl, void *ret_addr, struct i386_interrupt_state *regs, unsigned64_t delta) {
diff --git a/i386/xen/xen_boothdr.S b/i386/xen/xen_boothdr.S
index 1b32efb8..ac6ad25d 100644
--- a/i386/xen/xen_boothdr.S
+++ b/i386/xen/xen_boothdr.S
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006, 2009, 2010, 2011 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2011 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,11 +22,11 @@
.ascii "GUEST_OS=GNU Mach"
.ascii ",GUEST_VERSION=1.3"
.ascii ",XEN_VER=xen-3.0"
- .ascii ",VIRT_BASE=0x20000000"
- .ascii ",ELF_PADDR_OFFSET=0x20000000"
+ .ascii ",VIRT_BASE=0xC0000000"
+ .ascii ",ELF_PADDR_OFFSET=0xC0000000"
.ascii ",HYPERCALL_PAGE=0x2"
#if PAE
- .ascii ",PAE=yes"
+ .ascii ",PAE=yes[extended-cr3]"
#else
.ascii ",PAE=no"
#endif
@@ -36,6 +36,15 @@
#else /* MACH_PSEUDO_PHYS */
.ascii ",FEATURES=!auto_translated_physmap"
#endif
+#ifndef MACH_PV_PAGETABLES
+ .ascii "|!writable_page_tables"
+#endif /* MACH_PV_PAGETABLES */
+#ifndef MACH_PV_DESCRIPTORS
+ .ascii "|!writable_descriptor_tables"
+#endif /* MACH_PV_DESCRIPTORS */
+#ifndef MACH_RING1
+ .ascii "|!supervisor_mode_kernel"
+#endif /* MACH_PV_DESCRIPTORS */
.byte 0
/* Macro taken from linux/include/linux/elfnote.h */
@@ -59,7 +68,7 @@
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, start)
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypcalls)
#if PAE
- ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes[extended-cr3]")
#else
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
#endif
@@ -68,8 +77,17 @@
#ifdef MACH_PSEUDO_PHYS
"pae_pgdir_above_4gb"
#else /* MACH_PSEUDO_PHYS */
- "!auto_translated_physmap|"
+ "!auto_translated_physmap"
#endif
+#ifndef MACH_PV_PAGETABLES
+ "|!writable_page_tables"
+#endif /* MACH_PV_PAGETABLES */
+#ifndef MACH_PV_DESCRIPTORS
+ "|!writable_descriptor_tables"
+#endif /* MACH_PV_DESCRIPTORS */
+#ifndef MACH_RING1
+ "|!supervisor_mode_kernel"
+#endif /* MACH_RING1 */
)
#include <mach/machine/asm.h>
diff --git a/i386/xen/xen_locore.S b/i386/xen/xen_locore.S
index 51f823f2..1468ef80 100644
--- a/i386/xen/xen_locore.S
+++ b/i386/xen/xen_locore.S
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/device/device.defs b/include/device/device.defs
index 2bbd5563..d9234e39 100644
--- a/include/device/device.defs
+++ b/include/device/device.defs
@@ -32,10 +32,6 @@
* block and character device interfaces to the kernel.
*/
-#ifdef MACH_KERNEL
-simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
-#endif
-
subsystem
#if KERNEL_SERVER
KernelServer
@@ -99,27 +95,9 @@ routine device_read_inband(
out data : io_buf_ptr_inband_t
);
-/* obsolete */
-routine xxx_device_set_status(
- device : device_t;
- in flavor : dev_flavor_t;
- in status : dev_status_t, IsLong
- );
-
-/* obsolete */
-routine xxx_device_get_status(
- device : device_t;
- in flavor : dev_flavor_t;
- out status : dev_status_t, IsLong
- );
-
-/* obsolete */
-routine xxx_device_set_filter(
- device : device_t;
- in receive_port : mach_port_send_t;
- in priority : int;
- in filter : filter_array_t, IsLong
- );
+skip; /* old xxx_device_set_status */
+skip; /* old xxx_device_get_status */
+skip; /* old xxx_device_set_filter*/
routine device_map(
device : device_t;
diff --git a/include/mach/gnumach.defs b/include/mach/gnumach.defs
new file mode 100644
index 00000000..73313343
--- /dev/null
+++ b/include/mach/gnumach.defs
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Free Software Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+ gnumach 4200;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+type vm_cache_statistics_data_t = struct[11] of integer_t;
+
+/*
+ * Return page cache statistics for the host on which the target task
+ * resides.
+ */
+routine vm_cache_statistics(
+ target_task : vm_task_t;
+ out vm_cache_stats : vm_cache_statistics_data_t);
diff --git a/include/mach/mach.defs b/include/mach/mach.defs
index 4531a220..58510805 100644
--- a/include/mach/mach.defs
+++ b/include/mach/mach.defs
@@ -30,10 +30,6 @@
* Matchmaker definitions file for Mach kernel interface.
*/
-#ifdef MACH_KERNEL
-simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
-#endif /* MACH_KERNEL */
-
subsystem
#if KERNEL_USER
KernelUser
@@ -388,16 +384,8 @@ skip; /* old pager_flush_request */
* boolean). The new routine is backwards compatible at the C
* language interface.
*/
-simpleroutine xxx_memory_object_lock_request(
- memory_control : memory_object_control_t;
- offset : vm_offset_t;
- size : vm_size_t;
- should_clean : boolean_t;
- should_flush : boolean_t;
- lock_value : vm_prot_t;
- reply_to : mach_port_t =
- MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+skip; /* old xxx_memory_object_lock_request */
simpleroutine memory_object_lock_request(
memory_control : memory_object_control_t;
@@ -409,46 +397,11 @@ simpleroutine memory_object_lock_request(
reply_to : mach_port_t =
MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
-/* obsolete */
-routine xxx_task_get_emulation_vector(
- task : task_t;
- out vector_start : int;
- out emulation_vector: xxx_emulation_vector_t, IsLong);
-
-/* obsolete */
-routine xxx_task_set_emulation_vector(
- task : task_t;
- vector_start : int;
- emulation_vector: xxx_emulation_vector_t, IsLong);
-
-/*
- * Returns information about the host on which the
- * target object resides. [This object may be
- * a task, thread, or memory_object_control port.]
- */
-routine xxx_host_info(
- target_task : mach_port_t;
- out info : machine_info_data_t);
-
-/*
- * Returns information about a particular processor on
- * the host on which the target task resides.
- */
-routine xxx_slot_info(
- target_task : task_t;
- slot : int;
- out info : machine_slot_data_t);
-
-/*
- * Performs control operations (currently only
- * turning off or on) on a particular processor on
- * the host on which the target task resides.
- */
-routine xxx_cpu_control(
- target_task : task_t;
- cpu : int;
- running : boolean_t);
-
+skip; /* old xxx_task_get_emulation_vector */
+skip; /* old xxx_task_set_emulation_vector */
+skip; /* old xxx_host_info */
+skip; /* old xxx_slot_info */
+skip; /* old xxx_cpu_control */
skip; /* old thread_statistics */
skip; /* old task_statistics */
skip; /* old netport_init */
@@ -491,11 +444,7 @@ routine task_set_special_port(
which_port : int;
special_port : mach_port_t);
-/* obsolete */
-routine xxx_task_info(
- target_task : task_t;
- flavor : int;
- out task_info_out : task_info_t, IsLong);
+skip; /* old xxx_task_info */
/*
@@ -537,17 +486,8 @@ routine thread_resume(
routine thread_abort(
target_thread : thread_t);
-/* obsolete */
-routine xxx_thread_get_state(
- target_thread : thread_t;
- flavor : int;
- out old_state : thread_state_t, IsLong);
-
-/* obsolete */
-routine xxx_thread_set_state(
- target_thread : thread_t;
- flavor : int;
- new_state : thread_state_t, IsLong);
+skip; /* old xxx_thread_get_state */
+skip; /* old xxx_thread_set_state */
/*
* Returns the current value of the selected special port
@@ -567,11 +507,7 @@ routine thread_set_special_port(
which_port : int;
special_port : mach_port_t);
-/* obsolete */
-routine xxx_thread_info(
- target_thread : thread_t;
- flavor : int;
- out thread_info_out : thread_info_t, IsLong);
+skip; /* old xxx_thread_info */
/*
* Establish a user-level handler for the specified
diff --git a/include/mach/mach_host.defs b/include/mach/mach_host.defs
index 85ee4dc5..2644146f 100644
--- a/include/mach/mach_host.defs
+++ b/include/mach/mach_host.defs
@@ -34,10 +34,6 @@
* control.
*/
-#ifdef MACH_KERNEL
-simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
-#endif
-
subsystem
#if KERNEL_SERVER
KernelServer
@@ -59,19 +55,8 @@ routine host_processors(
host_priv : host_priv_t;
out processor_list : processor_array_t);
-/* obsolete */
-routine yyy_host_info(
- host : host_t;
- flavor : int;
- out host_info_out : host_info_t, IsLong);
-
-
-/* obsolete */
-routine yyy_processor_info(
- processor : processor_t;
- flavor : int;
- out host : host_t;
- out processor_info_out: processor_info_t, IsLong);
+skip; /* old yyy_host_info */
+skip; /* old yyy_processor_info */
/*
* Start processor.
@@ -87,10 +72,7 @@ routine processor_start(
routine processor_exit(
processor : processor_t);
-/* obsolete */
-routine yyy_processor_control(
- processor : processor_t;
- processor_cmd : processor_info_t, IsLong);
+skip; /* old yyy_processor_control */
/*
* Get default processor set for host.
@@ -99,13 +81,7 @@ routine processor_set_default(
host : host_t;
out default_set : processor_set_name_t);
-/*
- * Get rights to default processor set for host.
- * Replaced by host_processor_set_priv.
- */
-routine xxx_processor_set_default_priv(
- host : host_priv_t;
- out default_set : processor_set_t);
+skip; /* old xxx_processor_set_default_priv */
/*
* Create new processor set. Returns real port for manipulations,
@@ -122,12 +98,7 @@ routine processor_set_create(
routine processor_set_destroy(
set : processor_set_t);
-/* obsolete */
-routine yyy_processor_set_info(
- set_name : processor_set_name_t;
- flavor : int;
- out host : host_t;
- out info_out : processor_set_info_t, IsLong);
+skip; /* old yyy_processor_set_info */
/*
* Assign processor to processor set.
diff --git a/include/mach/mach_types.defs b/include/mach/mach_types.defs
index 4e448b81..607d5d92 100644
--- a/include/mach/mach_types.defs
+++ b/include/mach/mach_types.defs
@@ -121,11 +121,11 @@ type vm_statistics_data_t = struct[13] of integer_t;
type vm_machine_attribute_t = int;
type vm_machine_attribute_val_t = int;
-type thread_info_t = array[*:1024] of natural_t;
+type thread_info_t = array[*:1024] of integer_t;
type thread_basic_info_data_t = struct[11] of integer_t;
type thread_sched_info_data_t = struct[7] of integer_t;
-type task_info_t = array[*:1024] of natural_t;
+type task_info_t = array[*:1024] of integer_t;
type task_basic_info_data_t = struct[8] of integer_t;
type task_events_info = struct[7] of natural_t;
type task_thread_times_info_data_t = struct[4] of integer_t;
@@ -174,7 +174,7 @@ type host_priv_t = mach_port_t
#endif /* KERNEL_SERVER */
;
-type host_info_t = array[*:1024] of natural_t;
+type host_info_t = array[*:1024] of integer_t;
type host_basic_info_data_t = struct[5] of integer_t;
type host_sched_info_data_t = struct[2] of integer_t;
type host_load_info_data_t = struct[6] of integer_t;
@@ -189,7 +189,7 @@ type processor_t = mach_port_t
;
type processor_array_t = ^array[] of processor_t;
-type processor_info_t = array[*:1024] of natural_t;
+type processor_info_t = array[*:1024] of integer_t;
type processor_basic_info_data_t = struct[5] of integer_t;
@@ -215,7 +215,7 @@ type processor_set_name_t = mach_port_t
type processor_set_name_array_t = ^array[] of processor_set_name_t;
-type processor_set_info_t = array[*:1024] of natural_t;
+type processor_set_info_t = array[*:1024] of integer_t;
type processor_set_basic_info_data_t = struct[5] of integer_t;
type processor_set_sched_info_data_t = struct[2] of integer_t;
@@ -228,10 +228,6 @@ type time_value_t = struct[2] of integer_t;
type emulation_vector_t = ^array[] of vm_offset_t;
-type xxx_emulation_vector_t = array[*:1024] of vm_offset_t
- ctype: emulation_vector_t;
- /* XXX compatibility */
-
type rpc_signature_info_t = array[*:1024] of int;
#if KERNEL_SERVER
diff --git a/include/mach/mach_types.h b/include/mach/mach_types.h
index f6ceac3b..87684824 100644
--- a/include/mach/mach_types.h
+++ b/include/mach/mach_types.h
@@ -52,6 +52,7 @@
#include <mach/vm_inherit.h>
#include <mach/vm_prot.h>
#include <mach/vm_statistics.h>
+#include <mach/vm_cache_statistics.h>
#ifdef MACH_KERNEL
#include <kern/task.h> /* for task_array_t */
diff --git a/include/mach/port.h b/include/mach/port.h
index 6dafb2f3..53f60716 100644
--- a/include/mach/port.h
+++ b/include/mach/port.h
@@ -39,7 +39,7 @@
#include <mach/machine/vm_types.h>
-typedef natural_t mach_port_t;
+typedef vm_offset_t mach_port_t;
typedef mach_port_t *mach_port_array_t;
typedef int *rpc_signature_info_t;
diff --git a/include/mach/syscall_sw.h b/include/mach/syscall_sw.h
index af14c8de..89597e99 100644
--- a/include/mach/syscall_sw.h
+++ b/include/mach/syscall_sw.h
@@ -52,6 +52,7 @@ kernel_trap(mach_reply_port,-26,0)
kernel_trap(mach_thread_self,-27,0)
kernel_trap(mach_task_self,-28,0)
kernel_trap(mach_host_self,-29,0)
+kernel_trap(mach_print,-30,1)
kernel_trap(swtch_pri,-59,1)
kernel_trap(swtch,-60,0)
diff --git a/include/mach/vm_cache_statistics.h b/include/mach/vm_cache_statistics.h
new file mode 100644
index 00000000..072976af
--- /dev/null
+++ b/include/mach/vm_cache_statistics.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2012 Free Software Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _MACH_VM_CACHE_STATISTICS_H_
+#define _MACH_VM_CACHE_STATISTICS_H_
+
+#include <mach/machine/vm_types.h>
+
+struct vm_cache_statistics {
+ integer_t cache_object_count; /* # of cached objects */
+ integer_t cache_count; /* # of cached pages */
+ integer_t active_tmp_count; /* # of active temporary pages */
+ integer_t inactive_tmp_count; /* # of inactive temporary pages */
+ integer_t active_perm_count; /* # of active permanent pages */
+ integer_t inactive_perm_count; /* # of inactive permanent pages */
+ integer_t dirty_count; /* # of dirty pages */
+ integer_t laundry_count; /* # of pages being laundered */
+ integer_t writeback_count; /* # of pages being written back */
+ integer_t slab_count; /* # of slab allocator pages */
+ integer_t slab_reclaim_count; /* # of reclaimable slab pages */
+};
+
+typedef struct vm_cache_statistics *vm_cache_statistics_t;
+typedef struct vm_cache_statistics vm_cache_statistics_data_t;
+
+#endif /* _MACH_VM_CACHE_STATISTICS_H_ */
diff --git a/include/mach/xen.h b/include/mach/xen.h
index f1d9e418..6fc626f2 100644
--- a/include/mach/xen.h
+++ b/include/mach/xen.h
@@ -1,6 +1,6 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,6 +28,7 @@ extern struct start_info boot_info;
extern volatile struct shared_info hyp_shared_info;
+#ifdef MACH_PV_PAGETABLES
/* Memory translations */
/* pa are physical addresses, from 0 to size of memory */
@@ -77,6 +78,11 @@ extern unsigned long *mfn_list;
#define kv_to_mfn(a) pa_to_mfn(_kvtophys(a))
#define kv_to_ma(a) pa_to_ma(_kvtophys(a))
+#else /* MACH_PV_PAGETABLES */
+#define mfn_to_pfn(n) (n)
+#define pfn_to_mfn(n) (n)
+#endif /* MACH_PV_PAGETABLES */
+
#define mfn_to_kv(mfn) phystokv(ptoa(mfn_to_pfn(mfn)))
#include <machine/xen.h>
diff --git a/include/mach_debug/mach_debug.defs b/include/mach_debug/mach_debug.defs
index 2a58dc4c..053c3fe6 100644
--- a/include/mach_debug/mach_debug.defs
+++ b/include/mach_debug/mach_debug.defs
@@ -42,17 +42,7 @@ skip; /* host_ipc_statistics_reset */
skip; /* host_callout_info */
skip; /* host_callout_statistics */
skip; /* host_callout_statistics_reset */
-
-/*
- * Returns information about the memory allocation zones.
- */
-routine host_zone_info(
- host : host_t;
- out names : zone_name_array_t,
- CountInOut, Dealloc;
- out info : zone_info_array_t,
- CountInOut, Dealloc);
-
+skip; /* host_zone_info */
skip; /* host_ipc_bucket_info */
#if !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG
@@ -228,6 +218,14 @@ routine mach_vm_object_pages(
out pages : vm_page_info_array_t,
CountInOut, Dealloc);
+/*
+ * Returns information about the memory allocation caches.
+ */
+routine host_slab_info(
+ host : host_t;
+ out info : cache_info_array_t,
+ CountInOut, Dealloc);
+
#else /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
skip; /* mach_vm_region_info */
skip; /* mach_vm_object_info */
diff --git a/include/mach_debug/mach_debug_types.defs b/include/mach_debug/mach_debug_types.defs
index 9f1976fe..f60125a0 100644
--- a/include/mach_debug/mach_debug_types.defs
+++ b/include/mach_debug/mach_debug_types.defs
@@ -32,11 +32,8 @@
#include <mach/std_types.defs>
-type zone_name_t = struct[80] of char;
-type zone_name_array_t = array[] of zone_name_t;
-
-type zone_info_t = struct[9] of integer_t;
-type zone_info_array_t = array[] of zone_info_t;
+type cache_info_t = struct[19] of integer_t;
+type cache_info_array_t = array[] of cache_info_t;
type hash_info_bucket_t = struct[1] of natural_t;
type hash_info_bucket_array_t = array[] of hash_info_bucket_t;
diff --git a/include/mach_debug/mach_debug_types.h b/include/mach_debug/mach_debug_types.h
index 2ba0cb1f..5d4efcde 100644
--- a/include/mach_debug/mach_debug_types.h
+++ b/include/mach_debug/mach_debug_types.h
@@ -32,7 +32,7 @@
#include <mach_debug/ipc_info.h>
#include <mach_debug/vm_info.h>
-#include <mach_debug/zone_info.h>
+#include <mach_debug/slab_info.h>
#include <mach_debug/hash_info.h>
typedef char symtab_name_t[32];
diff --git a/include/mach_debug/zone_info.h b/include/mach_debug/slab_info.h
index 1b36fe02..37dcb8c4 100644
--- a/include/mach_debug/zone_info.h
+++ b/include/mach_debug/slab_info.h
@@ -24,38 +24,39 @@
* the rights to redistribute these changes.
*/
-#ifndef _MACH_DEBUG_ZONE_INFO_H_
-#define _MACH_DEBUG_ZONE_INFO_H_
+#ifndef _MACH_DEBUG_SLAB_INFO_H_
+#define _MACH_DEBUG_SLAB_INFO_H_
-#include <mach/boolean.h>
-#include <mach/machine/vm_types.h>
+#include <sys/types.h>
/*
* Remember to update the mig type definitions
* in mach_debug_types.defs when adding/removing fields.
*/
-#define ZONE_NAME_MAX_LEN 80
-
-typedef struct zone_name {
- char zn_name[ZONE_NAME_MAX_LEN];
-} zone_name_t;
-
-typedef zone_name_t *zone_name_array_t;
-
-
-typedef struct zone_info {
- integer_t zi_count; /* Number of elements used now */
- vm_size_t zi_cur_size; /* current memory utilization */
- vm_size_t zi_max_size; /* how large can this zone grow */
- vm_size_t zi_elem_size; /* size of an element */
- vm_size_t zi_alloc_size; /* size used for more memory */
-/*boolean_t*/integer_t zi_pageable; /* zone pageable? */
-/*boolean_t*/integer_t zi_sleepable; /* sleep if empty? */
-/*boolean_t*/integer_t zi_exhaustible; /* merely return if empty? */
-/*boolean_t*/integer_t zi_collectable; /* garbage collect elements? */
-} zone_info_t;
-
-typedef zone_info_t *zone_info_array_t;
-
-#endif /* _MACH_DEBUG_ZONE_INFO_H_ */
+#define CACHE_NAME_MAX_LEN 32
+
+#define CACHE_FLAGS_NO_CPU_POOL 0x01
+#define CACHE_FLAGS_SLAB_EXTERNAL 0x02
+#define CACHE_FLAGS_NO_RECLAIM 0x04
+#define CACHE_FLAGS_VERIFY 0x08
+#define CACHE_FLAGS_DIRECT 0x10
+
+typedef struct cache_info {
+ int flags;
+ size_t cpu_pool_size;
+ size_t obj_size;
+ size_t align;
+ size_t buf_size;
+ size_t slab_size;
+ unsigned long bufs_per_slab;
+ unsigned long nr_objs;
+ unsigned long nr_bufs;
+ unsigned long nr_slabs;
+ unsigned long nr_free_slabs;
+ char name[CACHE_NAME_MAX_LEN];
+} cache_info_t;
+
+typedef cache_info_t *cache_info_array_t;
+
+#endif /* _MACH_DEBUG_SLAB_INFO_H_ */
diff --git a/include/string.h b/include/string.h
index 8059bedb..c77d387b 100644
--- a/include/string.h
+++ b/include/string.h
@@ -32,7 +32,7 @@ extern void *memcpy (void *dest, const void *src, size_t n);
extern void *memmove (void *dest, const void *src, size_t n);
-extern int *memcmp (const void *s1, const void *s2, size_t n);
+extern int memcmp (const void *s1, const void *s2, size_t n);
extern void *memset (void *s, int c, size_t n);
diff --git a/include/sys/types.h b/include/sys/types.h
index d79e077c..19e7b242 100644
--- a/include/sys/types.h
+++ b/include/sys/types.h
@@ -30,7 +30,7 @@
#ifndef _SIZE_T
#define _SIZE_T
-typedef natural_t size_t;
+typedef unsigned long size_t;
#endif
#ifndef _SSIZE_T
diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
index 42e8dd8e..3a062447 100644
--- a/ipc/ipc_entry.c
+++ b/ipc/ipc_entry.c
@@ -41,7 +41,7 @@
#include <mach/port.h>
#include <kern/assert.h>
#include <kern/sched_prim.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <ipc/port.h>
#include <ipc/ipc_types.h>
#include <ipc/ipc_entry.h>
@@ -51,7 +51,7 @@
#include <ipc/ipc_table.h>
#include <ipc/ipc_object.h>
-zone_t ipc_tree_entry_zone;
+struct kmem_cache ipc_tree_entry_cache;
/*
* Routine: ipc_entry_tree_collision
diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h
index a577cf0c..6afa4f68 100644
--- a/ipc/ipc_entry.h
+++ b/ipc/ipc_entry.h
@@ -41,7 +41,7 @@
#include <mach/mach_types.h>
#include <mach/port.h>
#include <mach/kern_return.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <ipc/port.h>
#include <ipc/ipc_table.h>
#include <ipc/ipc_types.h>
@@ -129,10 +129,10 @@ typedef struct ipc_tree_entry {
#define ite_request ite_entry.ie_request
#define ite_next ite_entry.hash.tree
-extern zone_t ipc_tree_entry_zone;
+extern struct kmem_cache ipc_tree_entry_cache;
-#define ite_alloc() ((ipc_tree_entry_t) zalloc(ipc_tree_entry_zone))
-#define ite_free(ite) zfree(ipc_tree_entry_zone, (vm_offset_t) (ite))
+#define ite_alloc() ((ipc_tree_entry_t) kmem_cache_alloc(&ipc_tree_entry_cache))
+#define ite_free(ite) kmem_cache_free(&ipc_tree_entry_cache, (vm_offset_t) (ite))
extern ipc_entry_t
diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c
index c2412890..5eec58cb 100644
--- a/ipc/ipc_hash.c
+++ b/ipc/ipc_hash.c
@@ -326,7 +326,7 @@ ipc_hash_global_delete(
*/
#define IH_LOCAL_HASH(obj, size) \
- ((((mach_port_index_t) (obj)) >> 6) % (size))
+ ((((mach_port_index_t) (vm_offset_t) (obj)) >> 6) % (size))
/*
* Routine: ipc_hash_local_lookup
@@ -535,13 +535,9 @@ ipc_hash_init(void)
{
ipc_hash_index_t i;
- /* if not configured, initialize ipc_hash_global_size */
+ /* initialize ipc_hash_global_size */
- if (ipc_hash_global_size == 0) {
- ipc_hash_global_size = ipc_tree_entry_max >> 8;
- if (ipc_hash_global_size < 32)
- ipc_hash_global_size = 32;
- }
+ ipc_hash_global_size = IPC_HASH_GLOBAL_SIZE;
/* make sure it is a power of two */
diff --git a/ipc/ipc_hash.h b/ipc/ipc_hash.h
index 58c56ca8..929ba77d 100644
--- a/ipc/ipc_hash.h
+++ b/ipc/ipc_hash.h
@@ -67,6 +67,8 @@ ipc_hash_delete(ipc_space_t space, ipc_object_t obj,
* and the local primitives, for table entries.
*/
+#define IPC_HASH_GLOBAL_SIZE 256
+
extern boolean_t
ipc_hash_global_lookup(ipc_space_t space, ipc_object_t obj,
mach_port_t *namep, ipc_tree_entry_t *entryp);
diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c
index 90e6b783..ca7e7912 100644
--- a/ipc/ipc_init.c
+++ b/ipc/ipc_init.c
@@ -35,8 +35,8 @@
*/
#include <mach/kern_return.h>
-#include <kern/mach_param.h>
#include <kern/ipc_host.h>
+#include <kern/slab.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <ipc/ipc_entry.h>
@@ -52,14 +52,10 @@
-vm_map_t ipc_kernel_map;
+static struct vm_map ipc_kernel_map_store;
+vm_map_t ipc_kernel_map = &ipc_kernel_map_store;
vm_size_t ipc_kernel_map_size = 8 * 1024 * 1024;
-int ipc_space_max = SPACE_MAX;
-int ipc_tree_entry_max = ITE_MAX;
-int ipc_port_max = PORT_MAX;
-int ipc_pset_max = SET_MAX;
-
/*
* Routine: ipc_bootstrap
* Purpose:
@@ -77,28 +73,17 @@ ipc_bootstrap(void)
ipc_port_timestamp_lock_init();
ipc_port_timestamp_data = 0;
- ipc_space_zone = zinit(sizeof(struct ipc_space), 0,
- ipc_space_max * sizeof(struct ipc_space),
- sizeof(struct ipc_space),
- IPC_ZONE_TYPE, "ipc spaces");
-
- ipc_tree_entry_zone =
- zinit(sizeof(struct ipc_tree_entry), 0,
- ipc_tree_entry_max * sizeof(struct ipc_tree_entry),
- sizeof(struct ipc_tree_entry),
- IPC_ZONE_TYPE, "ipc tree entries");
-
- ipc_object_zones[IOT_PORT] =
- zinit(sizeof(struct ipc_port), 0,
- ipc_port_max * sizeof(struct ipc_port),
- sizeof(struct ipc_port),
- 0, "ipc ports");
-
- ipc_object_zones[IOT_PORT_SET] =
- zinit(sizeof(struct ipc_pset), 0,
- ipc_pset_max * sizeof(struct ipc_pset),
- sizeof(struct ipc_pset),
- IPC_ZONE_TYPE, "ipc port sets");
+ kmem_cache_init(&ipc_space_cache, "ipc_space",
+ sizeof(struct ipc_space), 0, NULL, NULL, NULL, 0);
+
+ kmem_cache_init(&ipc_tree_entry_cache, "ipc_tree_entry",
+ sizeof(struct ipc_tree_entry), 0, NULL, NULL, NULL, 0);
+
+ kmem_cache_init(&ipc_object_caches[IOT_PORT], "ipc_port",
+ sizeof(struct ipc_port), 0, NULL, NULL, NULL, 0);
+
+ kmem_cache_init(&ipc_object_caches[IOT_PORT_SET], "ipc_pset",
+ sizeof(struct ipc_pset), 0, NULL, NULL, NULL, 0);
/* create special spaces */
@@ -127,8 +112,8 @@ ipc_init()
{
vm_offset_t min, max;
- ipc_kernel_map = kmem_suballoc(kernel_map, &min, &max,
- ipc_kernel_map_size, TRUE);
+ kmem_submap(ipc_kernel_map, kernel_map, &min, &max,
+ ipc_kernel_map_size, TRUE);
ipc_host_init();
}
diff --git a/ipc/ipc_init.h b/ipc/ipc_init.h
index b2f1dd4b..8dd64bb5 100644
--- a/ipc/ipc_init.h
+++ b/ipc/ipc_init.h
@@ -37,14 +37,6 @@
#ifndef _IPC_IPC_INIT_H_
#define _IPC_IPC_INIT_H_
-/* all IPC zones should be exhaustible */
-#define IPC_ZONE_TYPE ZONE_EXHAUSTIBLE
-
-extern int ipc_space_max;
-extern int ipc_tree_entry_max;
-extern int ipc_port_max;
-extern int ipc_pset_max;
-
/*
* Exported interfaces
*/
diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
index a12c9476..c2689a48 100644
--- a/ipc/ipc_kmsg.c
+++ b/ipc/ipc_kmsg.c
@@ -1368,7 +1368,7 @@ ipc_kmsg_copyin_body(kmsg, space, map)
mach_msg_type_number_t number;
boolean_t is_inline, longform, dealloc, is_port;
vm_offset_t data;
- vm_size_t length;
+ unsigned64_t length;
kern_return_t kr;
type = (mach_msg_type_long_t *) saddr;
@@ -1419,7 +1419,7 @@ ipc_kmsg_copyin_body(kmsg, space, map)
/* calculate length of data in bytes, rounding up */
- length = ((number * size) + 7) >> 3;
+ length = (((unsigned64_t) number * size) + 7) >> 3;
if (is_inline) {
vm_size_t amount;
@@ -2382,7 +2382,7 @@ ipc_kmsg_copyout_body(saddr, eaddr, space, map)
mach_msg_type_size_t size;
mach_msg_type_number_t number;
boolean_t is_inline, longform, is_port;
- vm_size_t length;
+ unsigned64_t length;
vm_offset_t addr;
type = (mach_msg_type_long_t *) saddr;
@@ -2413,7 +2413,7 @@ ipc_kmsg_copyout_body(saddr, eaddr, space, map)
/* calculate length of data in bytes, rounding up */
- length = ((number * size) + 7) >> 3;
+ length = (((unsigned64_t) number * size) + 7) >> 3;
is_port = MACH_MSG_TYPE_PORT_ANY(name);
diff --git a/ipc/ipc_marequest.c b/ipc/ipc_marequest.c
index 540382af..06c53eb4 100644
--- a/ipc/ipc_marequest.c
+++ b/ipc/ipc_marequest.c
@@ -37,9 +37,8 @@
#include <mach/message.h>
#include <mach/port.h>
#include <kern/lock.h>
-#include <kern/mach_param.h>
#include <kern/kalloc.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <ipc/port.h>
#include <ipc/ipc_init.h>
#include <ipc/ipc_space.h>
@@ -58,11 +57,10 @@
#endif
-zone_t ipc_marequest_zone;
-int ipc_marequest_max = IMAR_MAX;
+struct kmem_cache ipc_marequest_cache;
-#define imar_alloc() ((ipc_marequest_t) zalloc(ipc_marequest_zone))
-#define imar_free(imar) zfree(ipc_marequest_zone, (vm_offset_t) (imar))
+#define imar_alloc() ((ipc_marequest_t) kmem_cache_alloc(&ipc_marequest_cache))
+#define imar_free(imar) kmem_cache_free(&ipc_marequest_cache, (vm_offset_t) (imar))
typedef unsigned int ipc_marequest_index_t;
@@ -100,13 +98,9 @@ ipc_marequest_init(void)
{
ipc_marequest_index_t i;
- /* if not configured, initialize ipc_marequest_size */
+ /* initialize ipc_marequest_size */
- if (ipc_marequest_size == 0) {
- ipc_marequest_size = ipc_marequest_max >> 8;
- if (ipc_marequest_size < 16)
- ipc_marequest_size = 16;
- }
+ ipc_marequest_size = IPC_MAREQUEST_SIZE;
/* make sure it is a power of two */
@@ -142,11 +136,8 @@ ipc_marequest_init(void)
bucket->imarb_head = IMAR_NULL;
}
- ipc_marequest_zone =
- zinit(sizeof(struct ipc_marequest), 0,
- ipc_marequest_max * sizeof(struct ipc_marequest),
- sizeof(struct ipc_marequest),
- IPC_ZONE_TYPE, "ipc msg-accepted requests");
+ kmem_cache_init(&ipc_marequest_cache, "ipc_marequest",
+ sizeof(struct ipc_marequest), 0, NULL, NULL, NULL, 0);
}
/*
@@ -439,7 +430,7 @@ ipc_marequest_info(maxp, info, count)
info[i].hib_count = bucket_count;
}
- *maxp = ipc_marequest_max;
+ *maxp = (unsigned int)-1;
return ipc_marequest_size;
}
diff --git a/ipc/ipc_marequest.h b/ipc/ipc_marequest.h
index eb3746d2..4f6f7584 100644
--- a/ipc/ipc_marequest.h
+++ b/ipc/ipc_marequest.h
@@ -70,6 +70,7 @@ typedef struct ipc_marequest {
#define IMAR_NULL ((ipc_marequest_t) 0)
+#define IPC_MAREQUEST_SIZE 16
extern void
ipc_marequest_init(void);
diff --git a/ipc/ipc_mqueue.c b/ipc/ipc_mqueue.c
index f3d5d4b0..80a34d3a 100644
--- a/ipc/ipc_mqueue.c
+++ b/ipc/ipc_mqueue.c
@@ -374,6 +374,8 @@ ipc_mqueue_send(kmsg, option, time_out)
}
}
+ current_task()->messages_sent++;
+
return MACH_MSG_SUCCESS;
}
@@ -684,6 +686,8 @@ ipc_mqueue_receive(
ip_unlock(port);
}
+ current_task()->messages_received++;
+
*kmsgp = kmsg;
*seqnop = seqno;
return MACH_MSG_SUCCESS;
diff --git a/ipc/ipc_notify.c b/ipc/ipc_notify.c
index d06346ea..25fa421b 100644
--- a/ipc/ipc_notify.c
+++ b/ipc/ipc_notify.c
@@ -264,7 +264,7 @@ ipc_notify_port_deleted(port, name)
kmsg = ikm_alloc(sizeof *n);
if (kmsg == IKM_NULL) {
- printf("dropped port-deleted (0x%p, 0x%x)\n", port, name);
+ printf("dropped port-deleted (0x%p, 0x%lx)\n", port, name);
ipc_port_release_sonce(port);
return;
}
@@ -298,7 +298,7 @@ ipc_notify_msg_accepted(port, name)
kmsg = ikm_alloc(sizeof *n);
if (kmsg == IKM_NULL) {
- printf("dropped msg-accepted (0x%p, 0x%x)\n", port, name);
+ printf("dropped msg-accepted (0x%p, 0x%lx)\n", port, name);
ipc_port_release_sonce(port);
return;
}
@@ -437,7 +437,7 @@ ipc_notify_dead_name(port, name)
kmsg = ikm_alloc(sizeof *n);
if (kmsg == IKM_NULL) {
- printf("dropped dead-name (0x%p, 0x%x)\n", port, name);
+ printf("dropped dead-name (0x%p, 0x%lx)\n", port, name);
ipc_port_release_sonce(port);
return;
}
diff --git a/ipc/ipc_object.c b/ipc/ipc_object.c
index a7a7ddb2..b8cae8f5 100644
--- a/ipc/ipc_object.c
+++ b/ipc/ipc_object.c
@@ -47,8 +47,14 @@
#include <ipc/ipc_pset.h>
#include <kern/debug.h>
#include <kern/printf.h>
+#include <kern/slab.h>
-zone_t ipc_object_zones[IOT_NUMBER];
+#if MACH_KDB
+#include <ddb/db_output.h>
+#endif /* MACH_KDB */
+
+
+struct kmem_cache ipc_object_caches[IOT_NUMBER];
diff --git a/ipc/ipc_object.h b/ipc/ipc_object.h
index 2bbf8bd7..adf5bca4 100644
--- a/ipc/ipc_object.h
+++ b/ipc/ipc_object.h
@@ -39,7 +39,7 @@
#include <ipc/ipc_types.h>
#include <kern/lock.h>
#include <kern/macro_help.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
typedef unsigned int ipc_object_refs_t;
typedef unsigned int ipc_object_bits_t;
@@ -57,7 +57,7 @@ typedef struct ipc_object {
#define IO_VALID(io) (((io) != IO_NULL) && ((io) != IO_DEAD))
#define IO_BITS_KOTYPE 0x0000ffff /* used by the object */
-#define IO_BITS_OTYPE 0x7fff0000 /* determines a zone */
+#define IO_BITS_OTYPE 0x7fff0000 /* determines a cache */
#define IO_BITS_ACTIVE 0x80000000U /* is object alive? */
#define io_active(io) ((int)(io)->io_bits < 0) /* hack */
@@ -75,13 +75,13 @@ typedef struct ipc_object {
#define IOT_PORT_SET 1
#define IOT_NUMBER 2 /* number of types used */
-extern zone_t ipc_object_zones[IOT_NUMBER];
+extern struct kmem_cache ipc_object_caches[IOT_NUMBER];
#define io_alloc(otype) \
- ((ipc_object_t) zalloc(ipc_object_zones[(otype)]))
+ ((ipc_object_t) kmem_cache_alloc(&ipc_object_caches[(otype)]))
#define io_free(otype, io) \
- zfree(ipc_object_zones[(otype)], (vm_offset_t) (io))
+ kmem_cache_free(&ipc_object_caches[(otype)], (vm_offset_t) (io))
#define io_lock_init(io) simple_lock_init(&(io)->io_lock_data)
#define io_lock(io) simple_lock(&(io)->io_lock_data)
diff --git a/ipc/ipc_port.c b/ipc/ipc_port.c
index ce0dbeb4..8e41c3ca 100644
--- a/ipc/ipc_port.c
+++ b/ipc/ipc_port.c
@@ -51,6 +51,9 @@
#include <ipc/ipc_mqueue.h>
#include <ipc/ipc_notify.h>
+#if MACH_KDB
+#include <ddb/db_output.h>
+#endif /* MACH_KDB */
decl_simple_lock_data(, ipc_port_multiple_lock_data)
diff --git a/ipc/ipc_pset.c b/ipc/ipc_pset.c
index 141cbdb8..e2b3c862 100644
--- a/ipc/ipc_pset.c
+++ b/ipc/ipc_pset.c
@@ -46,6 +46,10 @@
#include <ipc/ipc_right.h>
#include <ipc/ipc_space.h>
+#if MACH_KDB
+#include <ddb/db_output.h>
+#endif /* MACH_KDB */
+
/*
* Routine: ipc_pset_alloc
diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c
index 0f50f15b..ab55e838 100644
--- a/ipc/ipc_space.c
+++ b/ipc/ipc_space.c
@@ -43,7 +43,7 @@
#include <mach/port.h>
#include <kern/assert.h>
#include <kern/sched_prim.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <ipc/port.h>
#include <ipc/ipc_entry.h>
#include <ipc/ipc_splay.h>
@@ -55,7 +55,7 @@
-zone_t ipc_space_zone;
+struct kmem_cache ipc_space_cache;
ipc_space_t ipc_space_kernel;
ipc_space_t ipc_space_reply;
diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
index d030bf76..c4683d20 100644
--- a/ipc/ipc_space.h
+++ b/ipc/ipc_space.h
@@ -44,6 +44,7 @@
#include <mach/mach_types.h>
#include <kern/macro_help.h>
#include <kern/lock.h>
+#include <kern/slab.h>
#include <ipc/ipc_splay.h>
#include <ipc/ipc_types.h>
@@ -82,10 +83,10 @@ struct ipc_space {
#define IS_NULL ((ipc_space_t) 0)
-extern zone_t ipc_space_zone;
+extern struct kmem_cache ipc_space_cache;
-#define is_alloc() ((ipc_space_t) zalloc(ipc_space_zone))
-#define is_free(is) zfree(ipc_space_zone, (vm_offset_t) (is))
+#define is_alloc() ((ipc_space_t) kmem_cache_alloc(&ipc_space_cache))
+#define is_free(is) kmem_cache_free(&ipc_space_cache, (vm_offset_t) (is))
extern struct ipc_space *ipc_space_kernel;
extern struct ipc_space *ipc_space_reply;
diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c
index e5723586..cbb6a894 100644
--- a/ipc/ipc_table.c
+++ b/ipc/ipc_table.c
@@ -39,6 +39,7 @@
#include <ipc/ipc_port.h>
#include <ipc/ipc_entry.h>
#include <kern/kalloc.h>
+#include <kern/slab.h>
#include <vm/vm_kern.h>
/*
@@ -50,13 +51,6 @@ void ipc_table_fill(
unsigned int min,
vm_size_t elemsize);
-/*
- * We borrow the kalloc map, rather than creating
- * yet another submap of the kernel map.
- */
-
-extern vm_map_t kalloc_map;
-
ipc_table_size_t ipc_table_entries;
unsigned int ipc_table_entries_size = 512;
@@ -151,7 +145,7 @@ ipc_table_alloc(
if (size < PAGE_SIZE)
table = kalloc(size);
else
- if (kmem_alloc(kalloc_map, &table, size) != KERN_SUCCESS)
+ if (kmem_alloc(kmem_map, &table, size) != KERN_SUCCESS)
table = 0;
return table;
@@ -177,7 +171,7 @@ ipc_table_realloc(
{
vm_offset_t new_table;
- if (kmem_realloc(kalloc_map, old_table, old_size,
+ if (kmem_realloc(kmem_map, old_table, old_size,
&new_table, new_size) != KERN_SUCCESS)
new_table = 0;
@@ -201,5 +195,5 @@ ipc_table_free(
if (size < PAGE_SIZE)
kfree(table, size);
else
- kmem_free(kalloc_map, table, size);
+ kmem_free(kmem_map, table, size);
}
diff --git a/ipc/ipc_thread.h b/ipc/ipc_thread.h
index e8bfe4a8..fbeea46a 100644
--- a/ipc/ipc_thread.h
+++ b/ipc/ipc_thread.h
@@ -46,6 +46,11 @@ typedef thread_t ipc_thread_t;
#define ith_lock(thread) simple_lock(&(thread)->ith_lock_data)
#define ith_unlock(thread) simple_unlock(&(thread)->ith_lock_data)
+/*
+ * Note that this isn't a queue, but rather a stack. This causes
+ * threads that were recently running to be reused earlier, which
+ * helps improve locality of reference.
+ */
typedef struct ipc_thread_queue {
ipc_thread_t ithq_base;
} *ipc_thread_queue_t;
@@ -103,6 +108,7 @@ MACRO_BEGIN \
(thread)->ith_prev = _last; \
_first->ith_prev = (thread); \
_last->ith_next = (thread); \
+ (queue)->ithq_base = (thread); \
} \
MACRO_END
diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c
index 43ae918a..00ab085b 100644
--- a/ipc/mach_msg.c
+++ b/ipc/mach_msg.c
@@ -218,7 +218,7 @@ mach_msg_receive(msg, option, rcv_size, rcv_name, time_out, notify)
if (mr != MACH_MSG_SUCCESS) {
if (mr == MACH_RCV_TOO_LARGE) {
mach_msg_size_t real_size =
- (mach_msg_size_t) (natural_t) kmsg;
+ (mach_msg_size_t) (vm_offset_t) kmsg;
assert(real_size > rcv_size);
@@ -309,7 +309,7 @@ mach_msg_receive_continue(void)
if (mr != MACH_MSG_SUCCESS) {
if (mr == MACH_RCV_TOO_LARGE) {
mach_msg_size_t real_size =
- (mach_msg_size_t) (natural_t) kmsg;
+ (mach_msg_size_t) (vm_offset_t) kmsg;
assert(real_size > rcv_size);
diff --git a/ipc/mach_port.c b/ipc/mach_port.c
index c5688c90..d0310b55 100644
--- a/ipc/mach_port.c
+++ b/ipc/mach_port.c
@@ -571,7 +571,7 @@ mach_port_destroy(
kr = ipc_right_lookup_write(space, name, &entry);
if (kr != KERN_SUCCESS) {
if (name != MACH_PORT_NULL && name != MACH_PORT_DEAD && space == current_space()) {
- printf("task %p destroying an invalid port %u, most probably a bug.\n", current_task(), name);
+ printf("task %p destroying an invalid port %lu, most probably a bug.\n", current_task(), name);
if (mach_port_deallocate_debug)
SoftDebugger("mach_port_deallocate");
}
@@ -615,7 +615,7 @@ mach_port_deallocate(
kr = ipc_right_lookup_write(space, name, &entry);
if (kr != KERN_SUCCESS) {
if (name != MACH_PORT_NULL && name != MACH_PORT_DEAD && space == current_space()) {
- printf("task %p deallocating an invalid port %u, most probably a bug.\n", current_task(), name);
+ printf("task %p deallocating an invalid port %lu, most probably a bug.\n", current_task(), name);
if (mach_port_deallocate_debug)
SoftDebugger("mach_port_deallocate");
}
diff --git a/kern/act.c b/kern/act.c
index d0a03a3a..36fa79c1 100644
--- a/kern/act.c
+++ b/kern/act.c
@@ -30,8 +30,7 @@
#include <mach/kern_return.h>
#include <mach/alert.h>
-#include <kern/mach_param.h> /* XXX INCALL_... */
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <kern/thread.h>
#include <kern/task.h>
#include <kern/debug.h>
@@ -47,7 +46,7 @@ static void special_handler(ReturnHandler *rh, struct Act *act);
#endif
#ifndef ACT_STATIC_KLUDGE
-static zone_t act_zone;
+static struct kmem_cache act_cache;
#else
static Act *act_freelist;
static Act free_acts[ACT_STATIC_KLUDGE];
@@ -68,11 +67,8 @@ void
global_act_init()
{
#ifndef ACT_STATIC_KLUDGE
- act_zone = zinit(
- sizeof(struct Act), 0,
- ACT_MAX * sizeof(struct Act), /* XXX */
- ACT_CHUNK * sizeof(struct Act),
- 0, "activations");
+ kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0,
+ NULL, NULL, NULL, 0);
#else
int i;
@@ -104,7 +100,7 @@ kern_return_t act_create(task_t task, vm_offset_t user_stack,
int rc;
#ifndef ACT_STATIC_KLUDGE
- act = (Act*)zalloc(act_zone);
+ act = (Act*)kmem_cache_alloc(&act_cache);
if (act == 0)
return(KERN_RESOURCE_SHORTAGE);
#else
@@ -170,9 +166,9 @@ static void act_free(Act *inc)
/* Drop the task reference. */
task_deallocate(inc->task);
- /* Put the act back on the act zone */
+ /* Put the act back on the act cache */
#ifndef ACT_STATIC_KLUDGE
- zfree(act_zone, (vm_offset_t)inc);
+ kmem_cache_free(&act_cache, (vm_offset_t)inc);
#else
/* XXX ipt_lock(act_freelist); */
inc->ipt_next = act_freelist;
diff --git a/kern/ast.h b/kern/ast.h
index 695eaac0..4c28b1e6 100644
--- a/kern/ast.h
+++ b/kern/ast.h
@@ -68,7 +68,7 @@
#define AST_PER_THREAD (AST_HALT | AST_TERMINATE | MACHINE_AST_PER_THREAD)
-typedef unsigned int ast_t;
+typedef unsigned long ast_t;
extern volatile ast_t need_ast[NCPUS];
diff --git a/kern/boot_script.c b/kern/boot_script.c
index 93491267..b2e9393b 100644
--- a/kern/boot_script.c
+++ b/kern/boot_script.c
@@ -17,7 +17,7 @@ struct sym
int type;
/* Symbol value. */
- int val;
+ long val;
/* For function symbols; type of value returned by function. */
int ret_type;
@@ -44,7 +44,7 @@ struct arg
int type;
/* Argument value. */
- int val;
+ long val;
};
/* List of commands. */
@@ -67,23 +67,23 @@ static int symtab_index = 0;
/* Create a task and suspend it. */
static int
-create_task (struct cmd *cmd, int *val)
+create_task (struct cmd *cmd, long *val)
{
int err = boot_script_task_create (cmd);
- *val = (int) cmd->task;
+ *val = (long) cmd->task;
return err;
}
/* Resume a task. */
static int
-resume_task (struct cmd *cmd, int *val)
+resume_task (struct cmd *cmd, long *val)
{
return boot_script_task_resume (cmd);
}
/* Resume a task when the user hits return. */
static int
-prompt_resume_task (struct cmd *cmd, int *val)
+prompt_resume_task (struct cmd *cmd, long *val)
{
return boot_script_prompt_task_resume (cmd);
}
@@ -91,9 +91,9 @@ prompt_resume_task (struct cmd *cmd, int *val)
/* List of builtin symbols. */
static struct sym builtin_symbols[] =
{
- { "task-create", VAL_FUNC, (int) create_task, VAL_TASK, 0 },
- { "task-resume", VAL_FUNC, (int) resume_task, VAL_NONE, 1 },
- { "prompt-task-resume", VAL_FUNC, (int) prompt_resume_task, VAL_NONE, 1 },
+ { "task-create", VAL_FUNC, (long) create_task, VAL_TASK, 0 },
+ { "task-resume", VAL_FUNC, (long) resume_task, VAL_NONE, 1 },
+ { "prompt-task-resume", VAL_FUNC, (long) prompt_resume_task, VAL_NONE, 1 },
};
#define NUM_BUILTIN (sizeof (builtin_symbols) / sizeof (builtin_symbols[0]))
@@ -294,7 +294,8 @@ boot_script_parse_line (void *hook, char *cmdline)
for (p += 2;;)
{
char c;
- int i, val, type;
+ int i, type;
+ long val;
struct sym *s;
/* Parse symbol name. */
@@ -349,7 +350,7 @@ boot_script_parse_line (void *hook, char *cmdline)
if (! s->run_on_exec)
{
(error
- = ((*((int (*) (struct cmd *, int *)) s->val))
+ = ((*((int (*) (struct cmd *, long *)) s->val))
(cmd, &val)));
if (error)
goto bad;
@@ -371,7 +372,7 @@ boot_script_parse_line (void *hook, char *cmdline)
else if (s->type == VAL_NONE)
{
type = VAL_SYM;
- val = (int) s;
+ val = (long) s;
}
else
{
@@ -658,7 +659,7 @@ boot_script_exec ()
/* Create an entry for the variable NAME with TYPE and value VAL,
in the symbol table. */
int
-boot_script_set_variable (const char *name, int type, int val)
+boot_script_set_variable (const char *name, int type, long val)
{
struct sym *sym = sym_enter (name);
@@ -681,7 +682,7 @@ boot_script_define_function (const char *name, int ret_type,
if (sym)
{
sym->type = VAL_FUNC;
- sym->val = (int) func;
+ sym->val = (long) func;
sym->ret_type = ret_type;
sym->run_on_exec = ret_type == VAL_NONE;
}
diff --git a/kern/boot_script.h b/kern/boot_script.h
index c436ac21..c5ad6732 100644
--- a/kern/boot_script.h
+++ b/kern/boot_script.h
@@ -102,7 +102,7 @@ int boot_script_exec (void);
/* Create an entry in the symbol table for variable NAME,
whose type is TYPE and value is VAL. Returns 0 on success,
non-zero otherwise. */
-int boot_script_set_variable (const char *name, int type, int val);
+int boot_script_set_variable (const char *name, int type, long val);
/* Define the function NAME, which will return type RET_TYPE. */
int boot_script_define_function (const char *name, int ret_type,
diff --git a/kern/bootstrap.c b/kern/bootstrap.c
index 7dd7c140..4a39f26a 100644
--- a/kern/bootstrap.c
+++ b/kern/bootstrap.c
@@ -42,11 +42,13 @@
#include <kern/debug.h>
#include <kern/host.h>
#include <kern/printf.h>
+#include <kern/kalloc.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/lock.h>
#include <vm/vm_kern.h>
#include <vm/vm_user.h>
+#include <vm/pmap.h>
#include <device/device_port.h>
#if MACH_KDB
@@ -112,10 +114,10 @@ task_insert_send_right(
void bootstrap_create()
{
int compat;
+ int n = 0;
#ifdef MACH_XEN
struct multiboot_module *bmods = ((struct multiboot_module *)
boot_info.mod_start);
- int n = 0;
if (bmods)
for (n = 0; bmods[n].mod_start; n++) {
bmods[n].mod_start = kvtophys(bmods[n].mod_start + (vm_offset_t) bmods);
@@ -168,19 +170,19 @@ void bootstrap_create()
/* Initialize boot script variables. We leak these send rights. */
losers = boot_script_set_variable
("host-port", VAL_PORT,
- (int)ipc_port_make_send(realhost.host_priv_self));
+ (long)ipc_port_make_send(realhost.host_priv_self));
if (losers)
panic ("cannot set boot-script variable host-port: %s",
boot_script_error_string (losers));
losers = boot_script_set_variable
("device-port", VAL_PORT,
- (int) ipc_port_make_send(master_device_port));
+ (long) ipc_port_make_send(master_device_port));
if (losers)
panic ("cannot set boot-script variable device-port: %s",
boot_script_error_string (losers));
losers = boot_script_set_variable ("kernel-command-line", VAL_STR,
- (int) kernel_cmdline);
+ (long) kernel_cmdline);
if (losers)
panic ("cannot set boot-script variable %s: %s",
"kernel-command-line", boot_script_error_string (losers));
@@ -199,12 +201,12 @@ void bootstrap_create()
get_compat_strings(flag_string, root_string);
losers = boot_script_set_variable ("boot-args", VAL_STR,
- (int) flag_string);
+ (long) flag_string);
if (losers)
panic ("cannot set boot-script variable %s: %s",
"boot-args", boot_script_error_string (losers));
losers = boot_script_set_variable ("root-device", VAL_STR,
- (int) root_string);
+ (long) root_string);
if (losers)
panic ("cannot set boot-script variable %s: %s",
"root-device", boot_script_error_string (losers));
@@ -225,7 +227,7 @@ void bootstrap_create()
char *var = memcpy (alloca (len), *ep, len);
char *val = strchr (var, '=');
*val++ = '\0';
- losers = boot_script_set_variable (var, VAL_STR, (int) val);
+ losers = boot_script_set_variable (var, VAL_STR, (long) val);
if (losers)
panic ("cannot set boot-script variable %s: %s",
var, boot_script_error_string (losers));
@@ -246,7 +248,7 @@ void bootstrap_create()
if (eq == 0)
continue;
*eq++ = '\0';
- losers = boot_script_set_variable (word, VAL_STR, (int) eq);
+ losers = boot_script_set_variable (word, VAL_STR, (long) eq);
if (losers)
panic ("cannot set boot-script variable %s: %s",
word, boot_script_error_string (losers));
@@ -279,8 +281,10 @@ void bootstrap_create()
panic ("ERROR in executing boot script: %s",
boot_script_error_string (losers));
}
- /* XXX at this point, we could free all the memory used
- by the boot modules and the boot loader's descriptors and such. */
+ /* XXX we could free the memory used
+ by the boot loader's descriptors and such. */
+ for (n = 0; n < boot_info.mods_count; n++)
+ vm_page_create(bmods[n].mod_start, bmods[n].mod_end);
}
static void
diff --git a/kern/compat_xxx_defs.h b/kern/compat_xxx_defs.h
deleted file mode 100644
index 1878bb22..00000000
--- a/kern/compat_xxx_defs.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991 Carnegie Mellon University.
- * Copyright (c) 1993,1994 The University of Utah and
- * the Computer Systems Laboratory (CSL).
- * All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
- * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
- * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
- * THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * Compatibility definitions for the MiG-related changes
- * to various routines.
- *
- * When all user code has been relinked, this file and the xxx_
- * and yyy_ routines MUST be removed!
- */
-
-/* from mach.defs */
-
-#define xxx_task_info task_info
-#ifdef MIGRATING_THREADS
-#define xxx_thread_get_state act_get_state
-#define xxx_thread_set_state act_set_state
-#define xxx_thread_info act_info
-#else
-#define xxx_thread_get_state thread_get_state
-#define xxx_thread_set_state thread_set_state
-#define xxx_thread_info thread_info
-#endif /* MIGRATING_THREADS */
-
-/* from mach_host.defs */
-
-#define yyy_host_info host_info
-#define yyy_processor_info processor_info
-#define yyy_processor_set_info processor_set_info
-#define yyy_processor_control processor_control
-
-/* from device.defs */
-
-#define ds_xxx_device_set_status ds_device_set_status
-#define ds_xxx_device_get_status ds_device_get_status
-#define ds_xxx_device_set_filter ds_device_set_filter
-
-
-
diff --git a/kern/debug.c b/kern/debug.c
index 178d0788..8a04f1db 100644
--- a/kern/debug.c
+++ b/kern/debug.c
@@ -100,7 +100,7 @@ void SoftDebugger(message)
gimmeabreak();
#endif
-#ifdef i386
+#if defined(__i386__)
asm("int3");
#endif
}
@@ -168,7 +168,7 @@ panic(const char *s, ...)
#endif
printf(": ");
va_start(listp, s);
- _doprnt(s, &listp, do_cnputc, 0, 0);
+ _doprnt(s, listp, do_cnputc, 0, 0);
va_end(listp);
printf("\n");
@@ -205,7 +205,7 @@ log(int level, const char *fmt, ...)
level++;
#endif
va_start(listp, fmt);
- _doprnt(fmt, &listp, do_cnputc, 0, 0);
+ _doprnt(fmt, listp, do_cnputc, 0, 0);
va_end(listp);
}
diff --git a/kern/debug.h b/kern/debug.h
index f4e8200d..e429bdd1 100644
--- a/kern/debug.h
+++ b/kern/debug.h
@@ -57,6 +57,8 @@
#endif /* NDEBUG */
+extern void log (int level, const char *fmt, ...);
+
extern void panic_init(void);
extern void panic (const char *s, ...) __attribute__ ((noreturn));
diff --git a/kern/gnumach.srv b/kern/gnumach.srv
new file mode 100644
index 00000000..38bc9090
--- /dev/null
+++ b/kern/gnumach.srv
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach/gnumach.defs>
diff --git a/kern/ipc_host.c b/kern/ipc_host.c
index ea8f03ad..cd1c11ab 100644
--- a/kern/ipc_host.c
+++ b/kern/ipc_host.c
@@ -199,10 +199,9 @@ ipc_pset_terminate(
}
/*
- * processor_set_default, processor_set_default_priv:
+ * processor_set_default:
*
- * Return ports for manipulating default_processor set. MiG code
- * differentiates between these two routines.
+ * Return ports for manipulating default_processor set.
*/
kern_return_t
processor_set_default(
@@ -217,19 +216,6 @@ processor_set_default(
return KERN_SUCCESS;
}
-kern_return_t
-xxx_processor_set_default_priv(
- host_t host,
- processor_set_t *pset)
-{
- if (host == HOST_NULL)
- return KERN_INVALID_ARGUMENT;
-
- *pset = &default_pset;
- pset_reference(*pset);
- return KERN_SUCCESS;
-}
-
/*
* Routine: convert_port_to_host
* Purpose:
diff --git a/kern/ipc_kobject.c b/kern/ipc_kobject.c
index 3d8775b4..37d4eb99 100644
--- a/kern/ipc_kobject.c
+++ b/kern/ipc_kobject.c
@@ -46,6 +46,7 @@
#include <ipc/ipc_port.h>
#include <ipc/ipc_thread.h>
#include <vm/vm_object.h>
+#include <vm/memory_object_proxy.h>
#include <device/ds_routines.h>
#if MACH_MACHINE_ROUTINES
@@ -150,7 +151,8 @@ ipc_kobject_server(request)
mach_host_server_routine(),
device_server_routine(),
device_pager_server_routine(),
- mach4_server_routine();
+ mach4_server_routine(),
+ gnumach_server_routine();
#if MACH_DEBUG
extern mig_routine_t mach_debug_server_routine();
#endif
@@ -169,20 +171,27 @@ ipc_kobject_server(request)
|| (routine = mach_debug_server_routine(&request->ikm_header)) != 0
#endif /* MACH_DEBUG */
|| (routine = mach4_server_routine(&request->ikm_header)) != 0
+ || (routine = gnumach_server_routine(&request->ikm_header)) != 0
#if MACH_MACHINE_ROUTINES
|| (routine = MACHINE_SERVER_ROUTINE(&request->ikm_header)) != 0
#endif /* MACH_MACHINE_ROUTINES */
) {
(*routine)(&request->ikm_header, &reply->ikm_header);
- }
- else if (!ipc_kobject_notify(&request->ikm_header,&reply->ikm_header)){
+ kernel_task->messages_received++;
+ } else {
+ if (!ipc_kobject_notify(&request->ikm_header,
+ &reply->ikm_header)) {
((mig_reply_header_t *) &reply->ikm_header)->RetCode
= MIG_BAD_ID;
#if MACH_IPC_TEST
printf("ipc_kobject_server: bogus kernel message, id=%d\n",
request->ikm_header.msgh_id);
#endif /* MACH_IPC_TEST */
+ } else {
+ kernel_task->messages_received++;
+ }
}
+ kernel_task->messages_sent++;
}
check_simple_locks();
@@ -318,7 +327,7 @@ ipc_kobject_destroy(
default:
#if MACH_ASSERT
- printf("ipc_kobject_destroy: port 0x%p, kobj 0x%x, type %d\n",
+ printf("ipc_kobject_destroy: port 0x%p, kobj 0x%lx, type %d\n",
port, port->ip_kobject, ip_kotype(port));
#endif /* MACH_ASSERT */
break;
diff --git a/kern/ipc_tt.c b/kern/ipc_tt.c
index de4edc65..6d32e5b0 100644
--- a/kern/ipc_tt.c
+++ b/kern/ipc_tt.c
@@ -36,6 +36,7 @@
#include <mach/thread_special_ports.h>
#include <vm/vm_kern.h>
#include <kern/debug.h>
+#include <kern/kalloc.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/ipc_kobject.h>
diff --git a/kern/kalloc.c b/kern/kalloc.c
deleted file mode 100644
index 8256305b..00000000
--- a/kern/kalloc.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
- * Copyright (c) 1993,1994 The University of Utah and
- * the Computer Systems Laboratory (CSL).
- * All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
- * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
- * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
- * THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * File: kern/kalloc.c
- * Author: Avadis Tevanian, Jr.
- * Date: 1985
- *
- * General kernel memory allocator. This allocator is designed
- * to be used by the kernel to manage dynamic memory fast.
- */
-
-#include <mach/machine/vm_types.h>
-#include <mach/vm_param.h>
-
-#include <kern/debug.h>
-#include <kern/zalloc.h>
-#include <kern/kalloc.h>
-#include <vm/vm_kern.h>
-#include <vm/vm_object.h>
-#include <vm/vm_map.h>
-
-
-
-vm_map_t kalloc_map;
-vm_size_t kalloc_map_size = 64 * 1024 * 1024;
-vm_size_t kalloc_max;
-
-/*
- * All allocations of size less than kalloc_max are rounded to the
- * next highest power of 2. This allocator is built on top of
- * the zone allocator. A zone is created for each potential size
- * that we are willing to get in small blocks.
- *
- * We assume that kalloc_max is not greater than 64K;
- * thus 16 is a safe array size for k_zone and k_zone_name.
- */
-
-int first_k_zone = -1;
-struct zone *k_zone[16];
-static char *k_zone_name[16] = {
- "kalloc.1", "kalloc.2",
- "kalloc.4", "kalloc.8",
- "kalloc.16", "kalloc.32",
- "kalloc.64", "kalloc.128",
- "kalloc.256", "kalloc.512",
- "kalloc.1024", "kalloc.2048",
- "kalloc.4096", "kalloc.8192",
- "kalloc.16384", "kalloc.32768"
-};
-
-/*
- * Max number of elements per zone. zinit rounds things up correctly
- * Doing things this way permits each zone to have a different maximum size
- * based on need, rather than just guessing; it also
- * means its patchable in case you're wrong!
- */
-unsigned long k_zone_max[16] = {
- 1024, /* 1 Byte */
- 1024, /* 2 Byte */
- 1024, /* 4 Byte */
- 1024, /* 8 Byte */
- 1024, /* 16 Byte */
- 4096, /* 32 Byte */
- 4096, /* 64 Byte */
- 4096, /* 128 Byte */
- 4096, /* 256 Byte */
- 1024, /* 512 Byte */
- 1024, /* 1024 Byte */
- 1024, /* 2048 Byte */
- 1024, /* 4096 Byte */
- 4096, /* 8192 Byte */
- 64, /* 16384 Byte */
- 64, /* 32768 Byte */
-};
-
-/*
- * Initialize the memory allocator. This should be called only
- * once on a system wide basis (i.e. first processor to get here
- * does the initialization).
- *
- * This initializes all of the zones.
- */
-
-#ifndef NDEBUG
-static int kalloc_init_called;
-#endif
-
-void kalloc_init()
-{
- vm_offset_t min, max;
- vm_size_t size;
- register int i;
-
- assert (! kalloc_init_called);
-
- kalloc_map = kmem_suballoc(kernel_map, &min, &max,
- kalloc_map_size, FALSE);
-
- /*
- * Ensure that zones up to size 8192 bytes exist.
- * This is desirable because messages are allocated
- * with kalloc, and messages up through size 8192 are common.
- */
-
- if (PAGE_SIZE < 16*1024)
- kalloc_max = 16*1024;
- else
- kalloc_max = PAGE_SIZE;
-
- /*
- * Allocate a zone for each size we are going to handle.
- * We specify non-paged memory.
- */
- for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
- if (size < MINSIZE) {
- k_zone[i] = 0;
- continue;
- }
- if (size == MINSIZE) {
- first_k_zone = i;
- }
- k_zone[i] = zinit(size, 0, k_zone_max[i] * size, size,
- size >= PAGE_SIZE ? ZONE_COLLECTABLE : 0,
- k_zone_name[i]);
- }
-
-#ifndef NDEBUG
- kalloc_init_called = 1;
-#endif
-}
-
-vm_offset_t kalloc(size)
- vm_size_t size;
-{
- register int zindex;
- register vm_size_t allocsize;
- vm_offset_t addr;
-
- /* compute the size of the block that we will actually allocate */
-
- assert (kalloc_init_called);
-
- allocsize = size;
- if (size < kalloc_max) {
- allocsize = MINSIZE;
- zindex = first_k_zone;
- while (allocsize < size) {
- allocsize <<= 1;
- zindex++;
- }
- }
-
- /*
- * If our size is still small enough, check the queue for that size
- * and allocate.
- */
-
- if (allocsize < kalloc_max) {
- addr = zalloc(k_zone[zindex]);
- } else {
- if (kmem_alloc_wired(kalloc_map, &addr, allocsize)
- != KERN_SUCCESS)
- addr = 0;
- }
- return(addr);
-}
-
-vm_offset_t kget(size)
- vm_size_t size;
-{
- register int zindex;
- register vm_size_t allocsize;
- vm_offset_t addr;
-
- assert (kalloc_init_called);
-
- /* compute the size of the block that we will actually allocate */
-
- allocsize = size;
- if (size < kalloc_max) {
- allocsize = MINSIZE;
- zindex = first_k_zone;
- while (allocsize < size) {
- allocsize <<= 1;
- zindex++;
- }
- }
-
- /*
- * If our size is still small enough, check the queue for that size
- * and allocate.
- */
-
- if (allocsize < kalloc_max) {
- addr = zget(k_zone[zindex]);
- } else {
- /* This will never work, so we might as well panic */
- panic("kget");
- }
- return(addr);
-}
-
-void
-kfree(data, size)
- vm_offset_t data;
- vm_size_t size;
-{
- register int zindex;
- register vm_size_t freesize;
-
- assert (kalloc_init_called);
-
- freesize = size;
- if (size < kalloc_max) {
- freesize = MINSIZE;
- zindex = first_k_zone;
- while (freesize < size) {
- freesize <<= 1;
- zindex++;
- }
- }
-
- if (freesize < kalloc_max) {
- zfree(k_zone[zindex], data);
- } else {
- kmem_free(kalloc_map, data, freesize);
- }
-}
diff --git a/kern/kalloc.h b/kern/kalloc.h
index a80f6dbd..004e3a6b 100644
--- a/kern/kalloc.h
+++ b/kern/kalloc.h
@@ -28,11 +28,9 @@
#define _KERN_KALLOC_H_
#include <mach/machine/vm_types.h>
-
-#define MINSIZE 16
+#include <vm/vm_types.h>
extern vm_offset_t kalloc (vm_size_t size);
-extern vm_offset_t kget (vm_size_t size);
extern void kfree (vm_offset_t data, vm_size_t size);
extern void kalloc_init (void);
diff --git a/kern/list.h b/kern/list.h
new file mode 100644
index 00000000..03414718
--- /dev/null
+++ b/kern/list.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2009, 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Simple doubly-linked list.
+ */
+
+#ifndef _KERN_LIST_H
+#define _KERN_LIST_H
+
+#include <stddef.h>
+#include <sys/types.h>
+
+#define structof(ptr, type, member) \
+ ((type *)((char *)ptr - offsetof(type, member)))
+
+/*
+ * Structure used as both head and node.
+ *
+ * This implementation relies on using the same type for both heads and nodes.
+ *
+ * It is recommended to encode the use of struct list variables in their names,
+ * e.g. struct list free_list or struct list free_objects is a good hint for a
+ * list of free objects. A declaration like struct list free_node clearly
+ * indicates it is used as part of a node in the free list.
+ */
+struct list {
+ struct list *prev;
+ struct list *next;
+};
+
+/*
+ * Static list initializer.
+ */
+#define LIST_INITIALIZER(list) { &(list), &(list) }
+
+/*
+ * Initialize a list.
+ */
+static inline void list_init(struct list *list)
+{
+ list->prev = list;
+ list->next = list;
+}
+
+/*
+ * Initialize a list node.
+ *
+ * An entry is in no list when its node members point to NULL.
+ */
+static inline void list_node_init(struct list *node)
+{
+ node->prev = NULL;
+ node->next = NULL;
+}
+
+/*
+ * Return true if node is in no list.
+ */
+static inline int list_node_unlinked(const struct list *node)
+{
+ return node->prev == NULL;
+}
+
+/*
+ * Macro that evaluates to the address of the structure containing the
+ * given node based on the given type and member.
+ */
+#define list_entry(node, type, member) structof(node, type, member)
+
+/*
+ * Return the first node of a list.
+ */
+static inline struct list * list_first(const struct list *list)
+{
+ return list->next;
+}
+
+/*
+ * Return the last node of a list.
+ */
+static inline struct list * list_last(const struct list *list)
+{
+ return list->prev;
+}
+
+/*
+ * Return the node next to the given node.
+ */
+static inline struct list * list_next(const struct list *node)
+{
+ return node->next;
+}
+
+/*
+ * Return the node previous to the given node.
+ */
+static inline struct list * list_prev(const struct list *node)
+{
+ return node->prev;
+}
+
+/*
+ * Get the first entry of a list.
+ */
+#define list_first_entry(list, type, member) \
+ list_entry(list_first(list), type, member)
+
+/*
+ * Get the last entry of a list.
+ */
+#define list_last_entry(list, type, member) \
+ list_entry(list_last(list), type, member)
+
+/*
+ * Return true if node is after the last or before the first node of the list.
+ */
+static inline int list_end(const struct list *list, const struct list *node)
+{
+ return list == node;
+}
+
+/*
+ * Return true if list is empty.
+ */
+static inline int list_empty(const struct list *list)
+{
+ return list == list->next;
+}
+
+/*
+ * Return true if list contains exactly one node.
+ */
+static inline int list_singular(const struct list *list)
+{
+ return (list != list->next) && (list->next == list->prev);
+}
+
+/*
+ * Split list2 by moving its nodes up to (but not including) the given
+ * node into list1 (which can be in a stale state).
+ *
+ * If list2 is empty, or node is list2 or list2->next, nothing is done.
+ */
+static inline void list_split(struct list *list1, struct list *list2,
+ struct list *node)
+{
+ if (list_empty(list2) || (list2->next == node) || list_end(list2, node))
+ return;
+
+ list1->next = list2->next;
+ list1->next->prev = list1;
+
+ list1->prev = node->prev;
+ node->prev->next = list1;
+
+ list2->next = node;
+ node->prev = list2;
+}
+
+/*
+ * Append the nodes of list2 at the end of list1.
+ *
+ * After completion, list2 is stale.
+ */
+static inline void list_concat(struct list *list1, const struct list *list2)
+{
+ struct list *last1, *first2, *last2;
+
+ if (list_empty(list2))
+ return;
+
+ last1 = list1->prev;
+ first2 = list2->next;
+ last2 = list2->prev;
+
+ last1->next = first2;
+ first2->prev = last1;
+
+ last2->next = list1;
+ list1->prev = last2;
+}
+
+/*
+ * Set the new head of a list.
+ *
+ * This function is an optimized version of :
+ * list_init(&new_list);
+ * list_concat(&new_list, &old_list);
+ *
+ * After completion, old_head is stale.
+ */
+static inline void list_set_head(struct list *new_head,
+ const struct list *old_head)
+{
+ if (list_empty(old_head)) {
+ list_init(new_head);
+ return;
+ }
+
+ *new_head = *old_head;
+ new_head->next->prev = new_head;
+ new_head->prev->next = new_head;
+}
+
+/*
+ * Add a node between two nodes.
+ */
+static inline void list_add(struct list *prev, struct list *next,
+ struct list *node)
+{
+ next->prev = node;
+ node->next = next;
+
+ prev->next = node;
+ node->prev = prev;
+}
+
+/*
+ * Insert a node at the head of a list.
+ */
+static inline void list_insert(struct list *list, struct list *node)
+{
+ list_add(list, list->next, node);
+}
+
+/*
+ * Insert a node at the tail of a list.
+ */
+static inline void list_insert_tail(struct list *list, struct list *node)
+{
+ list_add(list->prev, list, node);
+}
+
+/*
+ * Insert a node before another node.
+ */
+static inline void list_insert_before(struct list *next, struct list *node)
+{
+ list_add(next->prev, next, node);
+}
+
+/*
+ * Insert a node after another node.
+ */
+static inline void list_insert_after(struct list *prev, struct list *node)
+{
+ list_add(prev, prev->next, node);
+}
+
+/*
+ * Remove a node from a list.
+ *
+ * After completion, the node is stale.
+ */
+static inline void list_remove(struct list *node)
+{
+ node->prev->next = node->next;
+ node->next->prev = node->prev;
+}
+
+/*
+ * Forge a loop to process all nodes of a list.
+ *
+ * The node must not be altered during the loop.
+ */
+#define list_for_each(list, node) \
+for (node = list_first(list); \
+ !list_end(list, node); \
+ node = list_next(node))
+
+/*
+ * Forge a loop to process all nodes of a list.
+ */
+#define list_for_each_safe(list, node, tmp) \
+for (node = list_first(list), tmp = list_next(node); \
+ !list_end(list, node); \
+ node = tmp, tmp = list_next(node))
+
+/*
+ * Version of list_for_each() that processes nodes backward.
+ */
+#define list_for_each_reverse(list, node) \
+for (node = list_last(list); \
+ !list_end(list, node); \
+ node = list_prev(node))
+
+/*
+ * Version of list_for_each_safe() that processes nodes backward.
+ */
+#define list_for_each_reverse_safe(list, node, tmp) \
+for (node = list_last(list), tmp = list_prev(node); \
+ !list_end(list, node); \
+ node = tmp, tmp = list_prev(node))
+
+/*
+ * Forge a loop to process all entries of a list.
+ *
+ * The entry node must not be altered during the loop.
+ */
+#define list_for_each_entry(list, entry, member) \
+for (entry = list_entry(list_first(list), typeof(*entry), member); \
+ !list_end(list, &entry->member); \
+ entry = list_entry(list_next(&entry->member), typeof(*entry), \
+ member))
+
+/*
+ * Forge a loop to process all entries of a list.
+ */
+#define list_for_each_entry_safe(list, entry, tmp, member) \
+for (entry = list_entry(list_first(list), typeof(*entry), member), \
+ tmp = list_entry(list_next(&entry->member), typeof(*entry), \
+ member); \
+ !list_end(list, &entry->member); \
+ entry = tmp, tmp = list_entry(list_next(&entry->member), \
+ typeof(*entry), member))
+
+/*
+ * Version of list_for_each_entry() that processes entries backward.
+ */
+#define list_for_each_entry_reverse(list, entry, member) \
+for (entry = list_entry(list_last(list), typeof(*entry), member); \
+ !list_end(list, &entry->member); \
+ entry = list_entry(list_prev(&entry->member), typeof(*entry), \
+ member))
+
+/*
+ * Version of list_for_each_entry_safe() that processes entries backward.
+ */
+#define list_for_each_entry_reverse_safe(list, entry, tmp, member) \
+for (entry = list_entry(list_last(list), typeof(*entry), member), \
+ tmp = list_entry(list_prev(&entry->member), typeof(*entry), \
+ member); \
+ !list_end(list, &entry->member); \
+ entry = tmp, tmp = list_entry(list_prev(&entry->member), \
+ typeof(*entry), member))
+
+#endif /* _KERN_LIST_H */
diff --git a/kern/lock.c b/kern/lock.c
index 0c61227a..44d4448e 100644
--- a/kern/lock.c
+++ b/kern/lock.c
@@ -162,8 +162,8 @@ void simple_lock(
info = &simple_locks_info[simple_locks_taken++];
info->l = l;
/* XXX we want our return address, if possible */
-#ifdef i386
- info->ra = *((unsigned int *)&l - 1);
+#if defined(__i386__)
+ info->ra = *((unsigned long *)&l - 1);
#endif /* i386 */
}
@@ -180,8 +180,8 @@ boolean_t simple_lock_try(
info = &simple_locks_info[simple_locks_taken++];
info->l = l;
/* XXX we want our return address, if possible */
-#ifdef i386
- info->ra = *((unsigned int *)&l - 1);
+#if defined(__i386__)
+ info->ra = *((unsigned long *)&l - 1);
#endif /* i386 */
return TRUE;
@@ -622,7 +622,7 @@ void db_show_all_slocks(void)
info = &simple_locks_info[i];
db_printf("%d: ", i);
db_printsym(info->l, DB_STGY_ANY);
-#if i386
+#if defined(__i386__)
db_printf(" locked by ");
db_printsym(info->ra, DB_STGY_PROC);
#endif
diff --git a/kern/mach.srv b/kern/mach.srv
index 3ed92593..b1cec606 100644
--- a/kern/mach.srv
+++ b/kern/mach.srv
@@ -37,6 +37,4 @@
#define thread_get_special_port act_get_special_port
#endif /* MIGRATING_THREADS */
-simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
-
#include <mach/mach.defs>
diff --git a/kern/mach_clock.c b/kern/mach_clock.c
index 04a31153..edf87f07 100644
--- a/kern/mach_clock.c
+++ b/kern/mach_clock.c
@@ -47,7 +47,6 @@
#include <kern/host.h>
#include <kern/lock.h>
#include <kern/mach_clock.h>
-#include <kern/mach_param.h>
#include <kern/processor.h>
#include <kern/queue.h>
#include <kern/sched.h>
@@ -91,7 +90,7 @@ int bigadj = 1000000; /* adjust 10*tickadj if adjustment
* } while (secs != mtime->check_seconds);
* to read the time correctly. (On a multiprocessor this assumes
* that processors see each other's writes in the correct order.
- * We may have to insert fence operations.)
+ * We have to insert write fence operations.) FIXME
*/
mapped_time_value_t *mtime = 0;
@@ -100,7 +99,9 @@ mapped_time_value_t *mtime = 0;
MACRO_BEGIN \
if (mtime != 0) { \
mtime->check_seconds = (time)->seconds; \
+ asm volatile("":::"memory"); \
mtime->microseconds = (time)->microseconds; \
+ asm volatile("":::"memory"); \
mtime->seconds = (time)->seconds; \
} \
MACRO_END
@@ -512,7 +513,7 @@ int timeclose()
/*
* Compatibility for device drivers.
* New code should use set_timeout/reset_timeout and private timers.
- * These code can't use a zone to allocate timers, because
+ * These code can't use a cache to allocate timers, because
* it can be called from interrupt handlers.
*/
diff --git a/kern/mach_clock.h b/kern/mach_clock.h
index 2009c709..4e4e8ff1 100644
--- a/kern/mach_clock.h
+++ b/kern/mach_clock.h
@@ -37,10 +37,12 @@ extern int hz; /* number of ticks per second */
extern int tick; /* number of usec per tick */
+typedef void timer_func_t(void *);
+
/* Time-out element. */
struct timer_elt {
queue_chain_t chain; /* chain in order of expiration */
- void (*fcn)(); /* function to call */
+ timer_func_t *fcn; /* function to call */
void * param; /* with this parameter */
unsigned long ticks; /* expiration time, in ticks */
int set; /* unset | set | allocated */
@@ -99,7 +101,7 @@ extern kern_return_t host_adjust_time(
extern void mapable_time_init (void);
/* For public timer elements. */
-extern void timeout(void (*fcn)(void *), void *param, int interval);
-extern boolean_t untimeout(void (*fcn)(void *), void *param);
+extern void timeout(timer_func_t *fcn, void *param, int interval);
+extern boolean_t untimeout(timer_func_t *fcn, void *param);
#endif /* _KERN_MACH_CLOCK_H_ */
diff --git a/kern/mach_factor.c b/kern/mach_factor.c
index 6fbda83d..558c4a06 100644
--- a/kern/mach_factor.c
+++ b/kern/mach_factor.c
@@ -36,10 +36,8 @@
#include <kern/mach_clock.h>
#include <kern/sched.h>
#include <kern/processor.h>
-#if MACH_KERNEL
#include <mach/kern_return.h>
#include <mach/port.h>
-#endif /* MACH_KERNEL */
#include "mach_factor.h"
diff --git a/kern/mach_host.srv b/kern/mach_host.srv
index 30d78db2..a18ab1cf 100644
--- a/kern/mach_host.srv
+++ b/kern/mach_host.srv
@@ -23,8 +23,6 @@
#define KERNEL_SERVER 1
-simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
-
#ifdef MIGRATING_THREADS
#define thread_assign act_thread_assign
#define thread_assign_default act_thread_assign_default
diff --git a/kern/mach_param.h b/kern/mach_param.h
deleted file mode 100644
index 10376d81..00000000
--- a/kern/mach_param.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
- * Copyright (c) 1993,1994 The University of Utah and
- * the Computer Systems Laboratory (CSL).
- * All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
- * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
- * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
- * THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * File: kern/mach_param.h
- * Author: Avadis Tevanian, Jr., Michael Wayne Young
- * Date: 1986
- *
- * Mach system sizing parameters
- *
- */
-
-#ifndef _KERN_MACH_PARAM_H_
-#define _KERN_MACH_PARAM_H_
-
-#define THREAD_MAX 1024 /* Max number of threads */
-#define THREAD_CHUNK 64 /* Allocation chunk */
-
-#define TASK_MAX 1024 /* Max number of tasks */
-#define TASK_CHUNK 64 /* Allocation chunk */
-
-#define ACT_MAX 1024 /* Max number of acts */
-#define ACT_CHUNK 64 /* Allocation chunk */
-
-#define ACTPOOL_MAX 1024
-#define ACTPOOL_CHUNK 64
-
-#define PORT_MAX ((TASK_MAX * 3 + THREAD_MAX) /* kernel */ \
- + (THREAD_MAX * 2) /* user */ \
- + 40000) /* slop for objects */
- /* Number of ports, system-wide */
-
-#define SET_MAX (TASK_MAX + THREAD_MAX + 200)
- /* Max number of port sets */
-
-#define ITE_MAX (1 << 16) /* Max number of splay tree entries */
-
-#define SPACE_MAX (TASK_MAX + 5) /* Max number of IPC spaces */
-
-#define IMAR_MAX (1 << 10) /* Max number of msg-accepted reqs */
-
-#endif /* _KERN_MACH_PARAM_H_ */
diff --git a/kern/machine.c b/kern/machine.c
index bcf394c3..c2a19b99 100644
--- a/kern/machine.c
+++ b/kern/machine.c
@@ -67,60 +67,6 @@ queue_head_t action_queue; /* assign/shutdown queue */
decl_simple_lock_data(,action_lock);
/*
- * xxx_host_info:
- *
- * Return the host_info structure. This routine is exported to the
- * user level.
- */
-kern_return_t xxx_host_info(task, info)
- task_t task;
- machine_info_t info;
-{
-#ifdef lint
- task++;
-#endif /* lint */
- *info = machine_info;
- return(KERN_SUCCESS);
-}
-
-/*
- * xxx_slot_info:
- *
- * Return the slot_info structure for the specified slot. This routine
- * is exported to the user level.
- */
-kern_return_t xxx_slot_info(task, slot, info)
- task_t task;
- int slot;
- machine_slot_t info;
-{
-#ifdef lint
- task++;
-#endif /* lint */
- if ((slot < 0) || (slot >= NCPUS))
- return(KERN_INVALID_ARGUMENT);
- *info = machine_slot[slot];
- return(KERN_SUCCESS);
-}
-
-/*
- * xxx_cpu_control:
- *
- * Support for user control of cpus. The user indicates which cpu
- * he is interested in, and whether or not that cpu should be running.
- */
-kern_return_t xxx_cpu_control(task, cpu, runnable)
- task_t task;
- int cpu;
- boolean_t runnable;
-{
-#ifdef lint
- task++; cpu++; runnable++;
-#endif /* lint */
- return(KERN_FAILURE);
-}
-
-/*
* cpu_up:
*
* Flag specified cpu as up and running. Called when a processor comes
diff --git a/kern/macro_help.h b/kern/macro_help.h
index e13b01d9..a3d156b7 100644
--- a/kern/macro_help.h
+++ b/kern/macro_help.h
@@ -45,8 +45,8 @@ boolean_t ALWAYS;
#define ALWAYS TRUE
#endif /* lint */
-#define MACRO_BEGIN do {
-#define MACRO_END } while (NEVER)
+#define MACRO_BEGIN ({
+#define MACRO_END })
#define MACRO_RETURN if (ALWAYS) return
diff --git a/kern/pc_sample.c b/kern/pc_sample.c
index c82707b2..57002581 100644
--- a/kern/pc_sample.c
+++ b/kern/pc_sample.c
@@ -31,6 +31,7 @@
#include <mach/std_types.h> /* pointer_t */
#include <mach/pc_sample.h>
#include <machine/trap.h>
+#include <kern/kalloc.h>
#include <kern/host.h>
#include <kern/thread.h>
#include <kern/pc_sample.h>
@@ -56,7 +57,7 @@ void take_pc_sample(
pc = interrupted_pc(t);
cp->seqno++;
sample = &((sampled_pc_t *)cp->buffer)[cp->seqno % MAX_PC_SAMPLES];
- sample->id = (natural_t)t;
+ sample->id = (vm_offset_t)t;
sample->pc = pc;
sample->sampletype = flavor;
}
diff --git a/kern/printf.c b/kern/printf.c
index 88a527ba..a3a771d0 100644
--- a/kern/printf.c
+++ b/kern/printf.c
@@ -39,7 +39,10 @@
* FILE *fd;
* char *format;
* {
+ * va_list listp;
+ * va_start(listp, fmt);
* _doprnt(format, &args, fd);
+ * va_end(listp);
* }
*
* would suffice. (This example does not handle the fprintf's "return
@@ -165,7 +168,7 @@ void printf_init(void)
void _doprnt(
register const char *fmt,
- va_list *argp,
+ va_list argp,
/* character output routine */
void (*putc)( char, vm_offset_t),
int radix, /* default radix - for '%r' */
@@ -248,7 +251,7 @@ void _doprnt(
}
}
else if (c == '*') {
- length = va_arg(*argp, int);
+ length = va_arg(argp, int);
c = *++fmt;
if (length < 0) {
ladjust = !ladjust;
@@ -266,7 +269,7 @@ void _doprnt(
}
}
else if (c == '*') {
- prec = va_arg(*argp, int);
+ prec = va_arg(argp, int);
c = *++fmt;
}
}
@@ -284,8 +287,8 @@ void _doprnt(
boolean_t any;
register int i;
- u = va_arg(*argp, unsigned long);
- p = va_arg(*argp, char *);
+ u = va_arg(argp, unsigned long);
+ p = va_arg(argp, char *);
base = *p++;
printnum(u, base, putc, putc_arg);
@@ -333,7 +336,7 @@ void _doprnt(
}
case 'c':
- c = va_arg(*argp, int);
+ c = va_arg(argp, int);
(*putc)(c, putc_arg);
break;
@@ -345,7 +348,7 @@ void _doprnt(
if (prec == -1)
prec = 0x7fffffff; /* MAXINT */
- p = va_arg(*argp, char *);
+ p = va_arg(argp, char *);
if (p == (char *)0)
p = "";
@@ -428,7 +431,7 @@ void _doprnt(
goto print_unsigned;
print_signed:
- n = va_arg(*argp, long);
+ n = va_arg(argp, long);
if (n >= 0) {
u = n;
sign_char = plus_sign;
@@ -440,7 +443,7 @@ void _doprnt(
goto print_num;
print_unsigned:
- u = va_arg(*argp, unsigned long);
+ u = va_arg(argp, unsigned long);
goto print_num;
print_num:
@@ -514,7 +517,7 @@ void _doprnt(
int vprintf(const char *fmt, va_list listp)
{
- _doprnt(fmt, &listp, (void (*)( char, vm_offset_t)) cnputc, 16, 0);
+ _doprnt(fmt, listp, (void (*)( char, vm_offset_t)) cnputc, 16, 0);
return 0;
}
@@ -550,7 +553,7 @@ void iprintf(const char *fmt, ...)
}
}
va_start(listp, fmt);
- _doprnt(fmt, &listp, (void (*)( char, vm_offset_t)) cnputc, 16, 0);
+ _doprnt(fmt, listp, (void (*)( char, vm_offset_t)) cnputc, 16, 0);
va_end(listp);
}
@@ -577,7 +580,7 @@ sprintf(char *buf, const char *fmt, ...)
char *start = buf;
va_start(listp, fmt);
- _doprnt(fmt, &listp, sputc, 16, (vm_offset_t)&buf);
+ _doprnt(fmt, listp, sputc, 16, (vm_offset_t)&buf);
va_end(listp);
*buf = 0;
@@ -601,12 +604,12 @@ snputc(char c, vm_offset_t arg)
}
int
-vsnprintf(char *buf, int size, const char *fmt, va_list args)
+vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
struct vsnprintf_cookie cookie
= { .buf = buf, .index = 0, .max_len = size };
- _doprnt (fmt, &args, snputc, 16, (vm_offset_t)&cookie);
+ _doprnt (fmt, args, snputc, 16, (vm_offset_t)&cookie);
cookie.buf[cookie.index] = '\0';
return cookie.index;
diff --git a/kern/printf.h b/kern/printf.h
index c5effe5d..8b4e7606 100644
--- a/kern/printf.h
+++ b/kern/printf.h
@@ -30,7 +30,7 @@
extern void printf_init (void);
extern void _doprnt (const char *fmt,
- va_list *argp,
+ va_list argp,
void (*putc)(char, vm_offset_t),
int radix,
vm_offset_t putc_arg);
@@ -40,6 +40,7 @@ extern void printnum (unsigned long u, int base,
vm_offset_t putc_arg);
extern int sprintf (char *buf, const char *fmt, ...);
+extern int vsnprintf (char *buf, size_t size, const char *fmt, va_list args);
extern int printf (const char *fmt, ...);
diff --git a/kern/priority.c b/kern/priority.c
index feddd8ee..17541b8b 100644
--- a/kern/priority.c
+++ b/kern/priority.c
@@ -39,7 +39,6 @@
#include <mach/machine.h>
#include <kern/host.h>
#include <kern/mach_clock.h>
-#include <kern/mach_param.h>
#include <kern/sched.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
diff --git a/kern/processor.c b/kern/processor.c
index 718ff3ad..19868609 100644
--- a/kern/processor.c
+++ b/kern/processor.c
@@ -35,6 +35,7 @@
#include <mach/vm_param.h>
#include <kern/cpu_number.h>
#include <kern/debug.h>
+#include <kern/kalloc.h>
#include <kern/lock.h>
#include <kern/host.h>
#include <kern/ipc_tt.h>
@@ -46,8 +47,8 @@
#include <ipc/ipc_port.h>
#if MACH_HOST
-#include <kern/zalloc.h>
-zone_t pset_zone;
+#include <kern/slab.h>
+struct kmem_cache pset_cache;
#endif /* MACH_HOST */
@@ -112,10 +113,10 @@ void pset_sys_init(void)
register processor_t processor;
/*
- * Allocate the zone for processor sets.
+ * Allocate the cache for processor sets.
*/
- pset_zone = zinit(sizeof(struct processor_set), 0, 128*PAGE_SIZE,
- PAGE_SIZE, 0, "processor sets");
+ kmem_cache_init(&pset_cache, "processor_set",
+ sizeof(struct processor_set), 0, NULL, NULL, NULL, 0);
/*
* Give each processor a control port.
@@ -394,7 +395,7 @@ void pset_deallocate(
/*
* That's it, free data structure.
*/
- zfree(pset_zone, (vm_offset_t)pset);
+ kmem_cache_free(&pset_cache, (vm_offset_t)pset);
#endif /* MACH_HOST */
}
@@ -538,7 +539,7 @@ processor_set_create(
if (host == HOST_NULL)
return KERN_INVALID_ARGUMENT;
- pset = (processor_set_t) zalloc(pset_zone);
+ pset = (processor_set_t) kmem_cache_alloc(&pset_cache);
pset_init(pset);
pset_reference(pset); /* for new_set out argument */
pset_reference(pset); /* for new_name out argument */
diff --git a/kern/rbtree.c b/kern/rbtree.c
new file mode 100644
index 00000000..0f5eb9ae
--- /dev/null
+++ b/kern/rbtree.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2010, 2012 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kern/assert.h>
+#include <kern/rbtree.h>
+#include <kern/rbtree_i.h>
+#include <sys/types.h>
+
+#define unlikely(expr) __builtin_expect(!!(expr), 0)
+
+/*
+ * Return the index of a node in the children array of its parent.
+ *
+ * The parent parameter must not be null, and must be the parent of the
+ * given node.
+ */
+static inline int rbtree_index(const struct rbtree_node *node,
+ const struct rbtree_node *parent)
+{
+ assert(parent != NULL);
+ assert((node == NULL) || (rbtree_parent(node) == parent));
+
+ if (parent->children[RBTREE_LEFT] == node)
+ return RBTREE_LEFT;
+
+ assert(parent->children[RBTREE_RIGHT] == node);
+
+ return RBTREE_RIGHT;
+}
+
+/*
+ * Return the color of a node.
+ */
+static inline int rbtree_color(const struct rbtree_node *node)
+{
+ return node->parent & RBTREE_COLOR_MASK;
+}
+
+/*
+ * Return true if the node is red.
+ */
+static inline int rbtree_is_red(const struct rbtree_node *node)
+{
+ return rbtree_color(node) == RBTREE_COLOR_RED;
+}
+
+/*
+ * Return true if the node is black.
+ */
+static inline int rbtree_is_black(const struct rbtree_node *node)
+{
+ return rbtree_color(node) == RBTREE_COLOR_BLACK;
+}
+
+/*
+ * Set the parent of a node, retaining its current color.
+ */
+static inline void rbtree_set_parent(struct rbtree_node *node,
+ struct rbtree_node *parent)
+{
+ assert(rbtree_check_alignment(node));
+ assert(rbtree_check_alignment(parent));
+
+ node->parent = (unsigned long)parent | (node->parent & RBTREE_COLOR_MASK);
+}
+
+/*
+ * Set the color of a node, retaining its current parent.
+ */
+static inline void rbtree_set_color(struct rbtree_node *node, int color)
+{
+ assert((color & ~RBTREE_COLOR_MASK) == 0);
+ node->parent = (node->parent & RBTREE_PARENT_MASK) | color;
+}
+
+/*
+ * Set the color of a node to red, retaining its current parent.
+ */
+static inline void rbtree_set_red(struct rbtree_node *node)
+{
+ rbtree_set_color(node, RBTREE_COLOR_RED);
+}
+
+/*
+ * Set the color of a node to black, retaining its current parent.
+ */
+static inline void rbtree_set_black(struct rbtree_node *node)
+{
+ rbtree_set_color(node, RBTREE_COLOR_BLACK);
+}
+
+/*
+ * Perform a tree rotation, rooted at the given node.
+ *
+ * The direction parameter defines the rotation direction and is either
+ * RBTREE_LEFT or RBTREE_RIGHT.
+ */
+static void rbtree_rotate(struct rbtree *tree, struct rbtree_node *node,
+ int direction)
+{
+ struct rbtree_node *parent, *rnode;
+ int left, right;
+
+ left = direction;
+ right = 1 - left;
+ parent = rbtree_parent(node);
+ rnode = node->children[right];
+
+ node->children[right] = rnode->children[left];
+
+ if (rnode->children[left] != NULL)
+ rbtree_set_parent(rnode->children[left], node);
+
+ rnode->children[left] = node;
+ rbtree_set_parent(rnode, parent);
+
+ if (unlikely(parent == NULL))
+ tree->root = rnode;
+ else
+ parent->children[rbtree_index(node, parent)] = rnode;
+
+ rbtree_set_parent(node, rnode);
+}
+
+void rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent,
+ int index, struct rbtree_node *node)
+{
+ struct rbtree_node *grand_parent, *uncle, *tmp;
+ int left, right;
+
+ assert(rbtree_check_alignment(parent));
+ assert(rbtree_check_alignment(node));
+
+ node->parent = (unsigned long)parent | RBTREE_COLOR_RED;
+ node->children[RBTREE_LEFT] = NULL;
+ node->children[RBTREE_RIGHT] = NULL;
+
+ if (unlikely(parent == NULL))
+ tree->root = node;
+ else
+ parent->children[index] = node;
+
+ for (;;) {
+ if (parent == NULL) {
+ rbtree_set_black(node);
+ break;
+ }
+
+ if (rbtree_is_black(parent))
+ break;
+
+ grand_parent = rbtree_parent(parent);
+ assert(grand_parent != NULL);
+
+ left = rbtree_index(parent, grand_parent);
+ right = 1 - left;
+
+ uncle = grand_parent->children[right];
+
+ /*
+ * Uncle is red. Flip colors and repeat at grand parent.
+ */
+ if ((uncle != NULL) && rbtree_is_red(uncle)) {
+ rbtree_set_black(uncle);
+ rbtree_set_black(parent);
+ rbtree_set_red(grand_parent);
+ node = grand_parent;
+ parent = rbtree_parent(node);
+ continue;
+ }
+
+ /*
+ * Node is the right child of its parent. Rotate left at parent.
+ */
+ if (parent->children[right] == node) {
+ rbtree_rotate(tree, parent, left);
+ tmp = node;
+ node = parent;
+ parent = tmp;
+ }
+
+ /*
+ * Node is the left child of its parent. Handle colors, rotate right
+ * at grand parent, and leave.
+ */
+ rbtree_set_black(parent);
+ rbtree_set_red(grand_parent);
+ rbtree_rotate(tree, grand_parent, right);
+ break;
+ }
+
+ assert(rbtree_is_black(tree->root));
+}
+
+void rbtree_remove(struct rbtree *tree, struct rbtree_node *node)
+{
+ struct rbtree_node *child, *parent, *brother;
+ int color, left, right;
+
+ if (node->children[RBTREE_LEFT] == NULL)
+ child = node->children[RBTREE_RIGHT];
+ else if (node->children[RBTREE_RIGHT] == NULL)
+ child = node->children[RBTREE_LEFT];
+ else {
+ struct rbtree_node *successor;
+
+ /*
+ * Two-children case: replace the node with its successor.
+ */
+
+ successor = node->children[RBTREE_RIGHT];
+
+ while (successor->children[RBTREE_LEFT] != NULL)
+ successor = successor->children[RBTREE_LEFT];
+
+ color = rbtree_color(successor);
+ child = successor->children[RBTREE_RIGHT];
+ parent = rbtree_parent(node);
+
+ if (unlikely(parent == NULL))
+ tree->root = successor;
+ else
+ parent->children[rbtree_index(node, parent)] = successor;
+
+ parent = rbtree_parent(successor);
+
+ /*
+ * Set parent directly to keep the original color.
+ */
+ successor->parent = node->parent;
+ successor->children[RBTREE_LEFT] = node->children[RBTREE_LEFT];
+ rbtree_set_parent(successor->children[RBTREE_LEFT], successor);
+
+ if (node == parent)
+ parent = successor;
+ else {
+ successor->children[RBTREE_RIGHT] = node->children[RBTREE_RIGHT];
+ rbtree_set_parent(successor->children[RBTREE_RIGHT], successor);
+ parent->children[RBTREE_LEFT] = child;
+
+ if (child != NULL)
+ rbtree_set_parent(child, parent);
+ }
+
+ goto update_color;
+ }
+
+ /*
+ * Node has at most one child.
+ */
+
+ color = rbtree_color(node);
+ parent = rbtree_parent(node);
+
+ if (child != NULL)
+ rbtree_set_parent(child, parent);
+
+ if (unlikely(parent == NULL))
+ tree->root = child;
+ else
+ parent->children[rbtree_index(node, parent)] = child;
+
+ /*
+ * The node has been removed, update the colors. The child pointer can
+ * be null, in which case it is considered a black leaf.
+ */
+update_color:
+ if (color == RBTREE_COLOR_RED)
+ return;
+
+ for (;;) {
+ if ((child != NULL) && rbtree_is_red(child)) {
+ rbtree_set_black(child);
+ break;
+ }
+
+ if (parent == NULL)
+ break;
+
+ left = rbtree_index(child, parent);
+ right = 1 - left;
+
+ brother = parent->children[right];
+
+ /*
+ * Brother is red. Recolor and rotate left at parent so that brother
+ * becomes black.
+ */
+ if (rbtree_is_red(brother)) {
+ rbtree_set_black(brother);
+ rbtree_set_red(parent);
+ rbtree_rotate(tree, parent, left);
+ brother = parent->children[right];
+ }
+
+ /*
+ * Brother has no red child. Recolor and repeat at parent.
+ */
+ if (((brother->children[RBTREE_LEFT] == NULL)
+ || rbtree_is_black(brother->children[RBTREE_LEFT]))
+ && ((brother->children[RBTREE_RIGHT] == NULL)
+ || rbtree_is_black(brother->children[RBTREE_RIGHT]))) {
+ rbtree_set_red(brother);
+ child = parent;
+ parent = rbtree_parent(child);
+ continue;
+ }
+
+ /*
+ * Brother's right child is black. Recolor and rotate right at brother.
+ */
+ if ((brother->children[right] == NULL)
+ || rbtree_is_black(brother->children[right])) {
+ rbtree_set_black(brother->children[left]);
+ rbtree_set_red(brother);
+ rbtree_rotate(tree, brother, right);
+ brother = parent->children[right];
+ }
+
+ /*
+ * Brother's left child is black. Exchange parent and brother colors
+ * (we already know brother is black), set brother's right child black,
+ * rotate left at parent and leave.
+ */
+ rbtree_set_color(brother, rbtree_color(parent));
+ rbtree_set_black(parent);
+ rbtree_set_black(brother->children[right]);
+ rbtree_rotate(tree, parent, left);
+ break;
+ }
+
+ assert((tree->root == NULL) || rbtree_is_black(tree->root));
+}
+
+struct rbtree_node * rbtree_nearest(struct rbtree_node *parent, int index,
+ int direction)
+{
+ assert(rbtree_check_index(direction));
+
+ if (parent == NULL)
+ return NULL;
+
+ assert(rbtree_check_index(index));
+
+ if (index != direction)
+ return parent;
+
+ return rbtree_walk(parent, direction);
+}
+
+struct rbtree_node * rbtree_firstlast(const struct rbtree *tree, int direction)
+{
+ struct rbtree_node *prev, *cur;
+
+ assert(rbtree_check_index(direction));
+
+ prev = NULL;
+
+ for (cur = tree->root; cur != NULL; cur = cur->children[direction])
+ prev = cur;
+
+ return prev;
+}
+
+struct rbtree_node * rbtree_walk(struct rbtree_node *node, int direction)
+{
+ int left, right;
+
+ assert(rbtree_check_index(direction));
+
+ left = direction;
+ right = 1 - left;
+
+ if (node == NULL)
+ return NULL;
+
+ if (node->children[left] != NULL) {
+ node = node->children[left];
+
+ while (node->children[right] != NULL)
+ node = node->children[right];
+ } else {
+ struct rbtree_node *parent;
+ int index;
+
+ for (;;) {
+ parent = rbtree_parent(node);
+
+ if (parent == NULL)
+ return NULL;
+
+ index = rbtree_index(node, parent);
+ node = parent;
+
+ if (index == right)
+ break;
+ }
+ }
+
+ return node;
+}
+
+/*
+ * Return the left-most deepest child node of the given node.
+ */
+static struct rbtree_node * rbtree_find_deepest(struct rbtree_node *node)
+{
+ struct rbtree_node *parent;
+
+ assert(node != NULL);
+
+ for (;;) {
+ parent = node;
+ node = node->children[RBTREE_LEFT];
+
+ if (node == NULL) {
+ node = parent->children[RBTREE_RIGHT];
+
+ if (node == NULL)
+ return parent;
+ }
+ }
+}
+
+struct rbtree_node * rbtree_postwalk_deepest(const struct rbtree *tree)
+{
+ struct rbtree_node *node;
+
+ node = tree->root;
+
+ if (node == NULL)
+ return NULL;
+
+ return rbtree_find_deepest(node);
+}
+
+struct rbtree_node * rbtree_postwalk_unlink(struct rbtree_node *node)
+{
+ struct rbtree_node *parent;
+ int index;
+
+ if (node == NULL)
+ return NULL;
+
+ assert(node->children[RBTREE_LEFT] == NULL);
+ assert(node->children[RBTREE_RIGHT] == NULL);
+
+ parent = rbtree_parent(node);
+
+ if (parent == NULL)
+ return NULL;
+
+ index = rbtree_index(node, parent);
+ parent->children[index] = NULL;
+ node = parent->children[RBTREE_RIGHT];
+
+ if (node == NULL)
+ return parent;
+
+ return rbtree_find_deepest(node);
+}
diff --git a/kern/rbtree.h b/kern/rbtree.h
new file mode 100644
index 00000000..5a65d1ef
--- /dev/null
+++ b/kern/rbtree.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Red-black tree.
+ */
+
+#ifndef _KERN_RBTREE_H
+#define _KERN_RBTREE_H
+
+#include <stddef.h>
+#include <kern/assert.h>
+#include <kern/macro_help.h>
+#include <kern/rbtree.h>
+#include <sys/types.h>
+
+#define structof(ptr, type, member) \
+ ((type *)((char *)ptr - offsetof(type, member)))
+
+/*
+ * Indexes of the left and right nodes in the children array of a node.
+ */
+#define RBTREE_LEFT 0
+#define RBTREE_RIGHT 1
+
+/*
+ * Red-black node.
+ */
+struct rbtree_node;
+
+/*
+ * Red-black tree.
+ */
+struct rbtree;
+
+/*
+ * Static tree initializer.
+ */
+#define RBTREE_INITIALIZER { NULL }
+
+#include "rbtree_i.h"
+
+/*
+ * Initialize a tree.
+ */
+static inline void rbtree_init(struct rbtree *tree)
+{
+ tree->root = NULL;
+}
+
+/*
+ * Initialize a node.
+ *
+ * A node is in no tree when its parent points to itself.
+ */
+static inline void rbtree_node_init(struct rbtree_node *node)
+{
+ assert(rbtree_check_alignment(node));
+
+ node->parent = (unsigned long)node | RBTREE_COLOR_RED;
+ node->children[RBTREE_LEFT] = NULL;
+ node->children[RBTREE_RIGHT] = NULL;
+}
+
+/*
+ * Return true if node is in no tree.
+ */
+static inline int rbtree_node_unlinked(const struct rbtree_node *node)
+{
+ return rbtree_parent(node) == node;
+}
+
+/*
+ * Macro that evaluates to the address of the structure containing the
+ * given node based on the given type and member.
+ */
+#define rbtree_entry(node, type, member) structof(node, type, member)
+
+/*
+ * Return true if tree is empty.
+ */
+static inline int rbtree_empty(const struct rbtree *tree)
+{
+ return tree->root == NULL;
+}
+
+/*
+ * Look up a node in a tree.
+ *
+ * Note that implementing the lookup algorithm as a macro gives two benefits:
+ * First, it avoids the overhead of a callback function. Next, the type of the
+ * cmp_fn parameter isn't rigid. The only guarantee offered by this
+ * implementation is that the key parameter is the first parameter given to
+ * cmp_fn. This way, users can pass only the value they need for comparison
+ * instead of e.g. allocating a full structure on the stack.
+ *
+ * See rbtree_insert().
+ */
+#define rbtree_lookup(tree, key, cmp_fn) \
+MACRO_BEGIN \
+ struct rbtree_node *___cur; \
+ int ___diff; \
+ \
+ ___cur = (tree)->root; \
+ \
+ while (___cur != NULL) { \
+ ___diff = cmp_fn(key, ___cur); \
+ \
+ if (___diff == 0) \
+ break; \
+ \
+ ___cur = ___cur->children[rbtree_d2i(___diff)]; \
+ } \
+ \
+ ___cur; \
+MACRO_END
+
+/*
+ * Look up a node or one of its nearest nodes in a tree.
+ *
+ * This macro essentially acts as rbtree_lookup() but if no entry matched
+ * the key, an additional step is performed to obtain the next or previous
+ * node, depending on the direction (left or right).
+ *
+ * The constraints that apply to the key parameter are the same as for
+ * rbtree_lookup().
+ */
+#define rbtree_lookup_nearest(tree, key, cmp_fn, dir) \
+MACRO_BEGIN \
+ struct rbtree_node *___cur, *___prev; \
+ int ___diff, ___index; \
+ \
+ ___prev = NULL; \
+ ___index = -1; \
+ ___cur = (tree)->root; \
+ \
+ while (___cur != NULL) { \
+ ___diff = cmp_fn(key, ___cur); \
+ \
+ if (___diff == 0) \
+ break; \
+ \
+ ___prev = ___cur; \
+ ___index = rbtree_d2i(___diff); \
+ ___cur = ___cur->children[___index]; \
+ } \
+ \
+ if (___cur == NULL) \
+ ___cur = rbtree_nearest(___prev, ___index, dir); \
+ \
+ ___cur; \
+MACRO_END
+
+/*
+ * Insert a node in a tree.
+ *
+ * This macro performs a standard lookup to obtain the insertion point of
+ * the given node in the tree (it is assumed that the inserted node never
+ * compares equal to any other entry in the tree) and links the node. It
+ * then It then checks red-black rules violations, and rebalances the tree
+ * if necessary.
+ *
+ * Unlike rbtree_lookup(), the cmp_fn parameter must compare two complete
+ * entries, so it is suggested to use two different comparison inline
+ * functions, such as myobj_cmp_lookup() and myobj_cmp_insert(). There is no
+ * guarantee about the order of the nodes given to the comparison function.
+ *
+ * See rbtree_lookup().
+ */
+#define rbtree_insert(tree, node, cmp_fn) \
+MACRO_BEGIN \
+ struct rbtree_node *___cur, *___prev; \
+ int ___diff, ___index; \
+ \
+ ___prev = NULL; \
+ ___index = -1; \
+ ___cur = (tree)->root; \
+ \
+ while (___cur != NULL) { \
+ ___diff = cmp_fn(node, ___cur); \
+ assert(___diff != 0); \
+ ___prev = ___cur; \
+ ___index = rbtree_d2i(___diff); \
+ ___cur = ___cur->children[___index]; \
+ } \
+ \
+ rbtree_insert_rebalance(tree, ___prev, ___index, node); \
+MACRO_END
+
+/*
+ * Look up a node/slot pair in a tree.
+ *
+ * This macro essentially acts as rbtree_lookup() but in addition to a node,
+ * it also returns a slot, which identifies an insertion point in the tree.
+ * If the returned node is null, the slot can be used by rbtree_insert_slot()
+ * to insert without the overhead of an additional lookup. The slot is a
+ * simple unsigned long integer.
+ *
+ * The constraints that apply to the key parameter are the same as for
+ * rbtree_lookup().
+ */
+#define rbtree_lookup_slot(tree, key, cmp_fn, slot) \
+MACRO_BEGIN \
+ struct rbtree_node *___cur, *___prev; \
+ int ___diff, ___index; \
+ \
+ ___prev = NULL; \
+ ___index = 0; \
+ ___cur = (tree)->root; \
+ \
+ while (___cur != NULL) { \
+ ___diff = cmp_fn(key, ___cur); \
+ \
+ if (___diff == 0) \
+ break; \
+ \
+ ___prev = ___cur; \
+ ___index = rbtree_d2i(___diff); \
+ ___cur = ___cur->children[___index]; \
+ } \
+ \
+ (slot) = rbtree_slot(___prev, ___index); \
+ ___cur; \
+MACRO_END
+
+/*
+ * Insert a node at an insertion point in a tree.
+ *
+ * This macro essentially acts as rbtree_insert() except that it doesn't
+ * obtain the insertion point with a standard lookup. The insertion point
+ * is obtained by calling rbtree_lookup_slot(). In addition, the new node
+ * must not compare equal to an existing node in the tree (i.e. the slot
+ * must denote a null node).
+ */
+static inline void
+rbtree_insert_slot(struct rbtree *tree, unsigned long slot,
+ struct rbtree_node *node)
+{
+ struct rbtree_node *parent;
+ int index;
+
+ parent = rbtree_slot_parent(slot);
+ index = rbtree_slot_index(slot);
+ rbtree_insert_rebalance(tree, parent, index, node);
+}
+
+/*
+ * Remove a node from a tree.
+ *
+ * After completion, the node is stale.
+ */
+void rbtree_remove(struct rbtree *tree, struct rbtree_node *node);
+
+/*
+ * Return the first node of a tree.
+ */
+#define rbtree_first(tree) rbtree_firstlast(tree, RBTREE_LEFT)
+
+/*
+ * Return the last node of a tree.
+ */
+#define rbtree_last(tree) rbtree_firstlast(tree, RBTREE_RIGHT)
+
+/*
+ * Return the node previous to the given node.
+ */
+#define rbtree_prev(node) rbtree_walk(node, RBTREE_LEFT)
+
+/*
+ * Return the node next to the given node.
+ */
+#define rbtree_next(node) rbtree_walk(node, RBTREE_RIGHT)
+
+/*
+ * Forge a loop to process all nodes of a tree, removing them when visited.
+ *
+ * This macro can only be used to destroy a tree, so that the resources used
+ * by the entries can be released by the user. It basically removes all nodes
+ * without doing any color checking.
+ *
+ * After completion, all nodes and the tree root member are stale.
+ */
+#define rbtree_for_each_remove(tree, node, tmp) \
+for (node = rbtree_postwalk_deepest(tree), \
+ tmp = rbtree_postwalk_unlink(node); \
+ node != NULL; \
+ node = tmp, tmp = rbtree_postwalk_unlink(node)) \
+
+#endif /* _KERN_RBTREE_H */
diff --git a/kern/rbtree_i.h b/kern/rbtree_i.h
new file mode 100644
index 00000000..69dfb9d0
--- /dev/null
+++ b/kern/rbtree_i.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _KERN_RBTREE_I_H
+#define _KERN_RBTREE_I_H
+
+#include <kern/assert.h>
+
+/*
+ * Red-black node structure.
+ *
+ * To reduce the number of branches and the instruction cache footprint,
+ * the left and right child pointers are stored in an array, and the symmetry
+ * of most tree operations is exploited by using left/right variables when
+ * referring to children.
+ *
+ * In addition, this implementation assumes that all nodes are 4-byte aligned,
+ * so that the least significant bit of the parent member can be used to store
+ * the color of the node. This is true for all modern 32 and 64 bits
+ * architectures, as long as the nodes aren't embedded in structures with
+ * special alignment constraints such as member packing.
+ */
+struct rbtree_node {
+ unsigned long parent;
+ struct rbtree_node *children[2];
+};
+
+/*
+ * Red-black tree structure.
+ */
+struct rbtree {
+ struct rbtree_node *root;
+};
+
+/*
+ * Masks applied on the parent member of a node to obtain either the
+ * color or the parent address.
+ */
+#define RBTREE_COLOR_MASK 0x1UL
+#define RBTREE_PARENT_MASK (~0x3UL)
+
+/*
+ * Node colors.
+ */
+#define RBTREE_COLOR_RED 0
+#define RBTREE_COLOR_BLACK 1
+
+/*
+ * Masks applied on slots to obtain either the child index or the parent
+ * address.
+ */
+#define RBTREE_SLOT_INDEX_MASK 0x1UL
+#define RBTREE_SLOT_PARENT_MASK (~RBTREE_SLOT_INDEX_MASK)
+
+/*
+ * Return true if the given pointer is suitably aligned.
+ */
+static inline int rbtree_check_alignment(const struct rbtree_node *node)
+{
+ return ((unsigned long)node & (~RBTREE_PARENT_MASK)) == 0;
+}
+
+/*
+ * Return true if the given index is a valid child index.
+ */
+static inline int rbtree_check_index(int index)
+{
+ return index == (index & 1);
+}
+
+/*
+ * Convert the result of a comparison into an index in the children array
+ * (0 or 1).
+ *
+ * This function is mostly used when looking up a node.
+ */
+static inline int rbtree_d2i(int diff)
+{
+ return !(diff <= 0);
+}
+
+/*
+ * Return the parent of a node.
+ */
+static inline struct rbtree_node * rbtree_parent(const struct rbtree_node *node)
+{
+ return (struct rbtree_node *)(node->parent & RBTREE_PARENT_MASK);
+}
+
+/*
+ * Translate an insertion point into a slot.
+ */
+static inline unsigned long rbtree_slot(struct rbtree_node *parent, int index)
+{
+ assert(rbtree_check_alignment(parent));
+ assert(rbtree_check_index(index));
+ return (unsigned long)parent | index;
+}
+
+/*
+ * Extract the parent address from a slot.
+ */
+static inline struct rbtree_node * rbtree_slot_parent(unsigned long slot)
+{
+ return (struct rbtree_node *)(slot & RBTREE_SLOT_PARENT_MASK);
+}
+
+/*
+ * Extract the index from a slot.
+ */
+static inline int rbtree_slot_index(unsigned long slot)
+{
+ return slot & RBTREE_SLOT_INDEX_MASK;
+}
+
+/*
+ * Insert a node in a tree, rebalancing it if necessary.
+ *
+ * The index parameter is the index in the children array of the parent where
+ * the new node is to be inserted. It is ignored if the parent is null.
+ *
+ * This function is intended to be used by the rbtree_insert() macro only.
+ */
+void rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent,
+ int index, struct rbtree_node *node);
+
+/*
+ * Return the previous or next node relative to a location in a tree.
+ *
+ * The parent and index parameters define the location, which can be empty.
+ * The direction parameter is either RBTREE_LEFT (to obtain the previous
+ * node) or RBTREE_RIGHT (to obtain the next one).
+ */
+struct rbtree_node * rbtree_nearest(struct rbtree_node *parent, int index,
+ int direction);
+
+/*
+ * Return the first or last node of a tree.
+ *
+ * The direction parameter is either RBTREE_LEFT (to obtain the first node)
+ * or RBTREE_RIGHT (to obtain the last one).
+ */
+struct rbtree_node * rbtree_firstlast(const struct rbtree *tree, int direction);
+
+/*
+ * Return the node next to, or previous to the given node.
+ *
+ * The direction parameter is either RBTREE_LEFT (to obtain the previous node)
+ * or RBTREE_RIGHT (to obtain the next one).
+ */
+struct rbtree_node * rbtree_walk(struct rbtree_node *node, int direction);
+
+/*
+ * Return the left-most deepest node of a tree, which is the starting point of
+ * the postorder traversal performed by rbtree_for_each_remove().
+ */
+struct rbtree_node * rbtree_postwalk_deepest(const struct rbtree *tree);
+
+/*
+ * Unlink a node from its tree and return the next (right) node in postorder.
+ */
+struct rbtree_node * rbtree_postwalk_unlink(struct rbtree_node *node);
+
+#endif /* _KERN_RBTREE_I_H */
diff --git a/kern/sched_prim.c b/kern/sched_prim.c
index ff942aee..ba2dc8ab 100644
--- a/kern/sched_prim.c
+++ b/kern/sched_prim.c
@@ -139,14 +139,14 @@ void thread_check(thread_t, run_queue_t);
* The wait event hash table declarations are as follows:
*/
-#define NUMQUEUES 59
+#define NUMQUEUES 1031
queue_head_t wait_queue[NUMQUEUES];
decl_simple_lock_data(, wait_lock[NUMQUEUES])
/* NOTE: we want a small positive integer out of this */
#define wait_hash(event) \
- ((((int)(event) < 0) ? ~(int)(event) : (int)(event)) % NUMQUEUES)
+ ((((long)(event) < 0) ? ~(long)(event) : (long)(event)) % NUMQUEUES)
void wait_queue_init(void)
{
@@ -637,6 +637,7 @@ boolean_t thread_invoke(
thread_lock(new_thread);
new_thread->state &= ~TH_UNINT;
thread_unlock(new_thread);
+ thread_wakeup(&new_thread->state);
if (continuation != (void (*)()) 0) {
(void) spl0();
@@ -658,6 +659,7 @@ boolean_t thread_invoke(
new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
thread_unlock(new_thread);
+ thread_wakeup(&new_thread->state);
#if NCPUS > 1
new_thread->last_processor = current_processor();
@@ -787,6 +789,7 @@ boolean_t thread_invoke(
new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
thread_unlock(new_thread);
+ thread_wakeup(&new_thread->state);
/*
* Thread is now interruptible.
diff --git a/kern/server_loop.ch b/kern/server_loop.ch
index 1aa7edbc..409e013d 100644
--- a/kern/server_loop.ch
+++ b/kern/server_loop.ch
@@ -39,6 +39,7 @@
*/
#include <kern/debug.h>
+#include <kern/kalloc.h>
#include <mach/port.h>
#include <mach/message.h>
#include <vm/vm_kern.h> /* for kernel_map */
diff --git a/kern/slab.c b/kern/slab.c
new file mode 100644
index 00000000..99d5bca3
--- /dev/null
+++ b/kern/slab.c
@@ -0,0 +1,1615 @@
+/*
+ * Copyright (c) 2011 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Object caching and general purpose memory allocator.
+ *
+ * This allocator is based on the paper "The Slab Allocator: An Object-Caching
+ * Kernel Memory Allocator" by Jeff Bonwick.
+ *
+ * It allows the allocation of objects (i.e. fixed-size typed buffers) from
+ * caches and is efficient in both space and time. This implementation follows
+ * many of the indications from the paper mentioned. The most notable
+ * differences are outlined below.
+ *
+ * The per-cache self-scaling hash table for buffer-to-bufctl conversion,
+ * described in 3.2.3 "Slab Layout for Large Objects", has been replaced by
+ * a red-black tree storing slabs, sorted by address. The use of a
+ * self-balancing tree for buffer-to-slab conversions provides a few advantages
+ * over a hash table. Unlike a hash table, a BST provides a "lookup nearest"
+ * operation, so obtaining the slab data (whether it is embedded in the slab or
+ * off slab) from a buffer address simply consists of a "lookup nearest towards
+ * 0" tree search. Storing slabs instead of buffers also considerably reduces
+ * the number of elements to retain. Finally, a self-balancing tree is a true
+ * self-scaling data structure, whereas a hash table requires periodic
+ * maintenance and complete resizing, which is expensive. The only drawback is
+ * that releasing a buffer to the slab layer takes logarithmic time instead of
+ * constant time. But as the data set size is kept reasonable (because slabs
+ * are stored instead of buffers) and because the CPU pool layer services most
+ * requests, avoiding many accesses to the slab layer, it is considered an
+ * acceptable tradeoff.
+ *
+ * This implementation uses per-cpu pools of objects, which service most
+ * allocation requests. These pools act as caches (but are named differently
+ * to avoid confusion with CPU caches) that reduce contention on multiprocessor
+ * systems. When a pool is empty and cannot provide an object, it is filled by
+ * transferring multiple objects from the slab layer. The symmetric case is
+ * handled likewise.
+ */
+
+#include <string.h>
+#include <kern/assert.h>
+#include <kern/mach_clock.h>
+#include <kern/printf.h>
+#include <kern/slab.h>
+#include <kern/kalloc.h>
+#include <kern/cpu_number.h>
+#include <mach/vm_param.h>
+#include <mach/machine/vm_types.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_types.h>
+#include <sys/types.h>
+
+#ifdef MACH_DEBUG
+#include <mach_debug/slab_info.h>
+#endif
+
+/*
+ * Utility macros.
+ */
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
+#define ISP2(x) P2ALIGNED(x, x)
+#define P2ALIGN(x, a) ((x) & -(a))
+#define P2ROUND(x, a) (-(-(x) & -(a)))
+#define P2END(x, a) (-(~(x) & -(a)))
+#define likely(expr) __builtin_expect(!!(expr), 1)
+#define unlikely(expr) __builtin_expect(!!(expr), 0)
+
+/*
+ * Minimum required alignment.
+ */
+#define KMEM_ALIGN_MIN 8
+
+/*
+ * Minimum number of buffers per slab.
+ *
+ * This value is ignored when the slab size exceeds a threshold.
+ */
+#define KMEM_MIN_BUFS_PER_SLAB 8
+
+/*
+ * Special slab size beyond which the minimum number of buffers per slab is
+ * ignored when computing the slab size of a cache.
+ */
+#define KMEM_SLAB_SIZE_THRESHOLD (8 * PAGE_SIZE)
+
+/*
+ * Special buffer size under which slab data is unconditionnally allocated
+ * from its associated slab.
+ */
+#define KMEM_BUF_SIZE_THRESHOLD (PAGE_SIZE / 8)
+
+/*
+ * Time (in ticks) between two garbage collection operations.
+ */
+#define KMEM_GC_INTERVAL (5 * hz)
+
+/*
+ * The transfer size of a CPU pool is computed by dividing the pool size by
+ * this value.
+ */
+#define KMEM_CPU_POOL_TRANSFER_RATIO 2
+
+/*
+ * Redzone guard word.
+ */
+#ifdef __LP64__
+#if _HOST_BIG_ENDIAN
+#define KMEM_REDZONE_WORD 0xfeedfacefeedfaceUL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_REDZONE_WORD 0xcefaedfecefaedfeUL
+#endif /* _HOST_BIG_ENDIAN */
+#else /* __LP64__ */
+#if _HOST_BIG_ENDIAN
+#define KMEM_REDZONE_WORD 0xfeedfaceUL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_REDZONE_WORD 0xcefaedfeUL
+#endif /* _HOST_BIG_ENDIAN */
+#endif /* __LP64__ */
+
+/*
+ * Redzone byte for padding.
+ */
+#define KMEM_REDZONE_BYTE 0xbb
+
+/*
+ * Size of the VM submap from which default backend functions allocate.
+ */
+#define KMEM_MAP_SIZE (128 * 1024 * 1024)
+
+/*
+ * Shift for the first kalloc cache size.
+ */
+#define KALLOC_FIRST_SHIFT 5
+
+/*
+ * Number of caches backing general purpose allocations.
+ */
+#define KALLOC_NR_CACHES 13
+
+/*
+ * Values the buftag state member can take.
+ */
+#ifdef __LP64__
+#if _HOST_BIG_ENDIAN
+#define KMEM_BUFTAG_ALLOC 0xa110c8eda110c8edUL
+#define KMEM_BUFTAG_FREE 0xf4eeb10cf4eeb10cUL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_BUFTAG_ALLOC 0xedc810a1edc810a1UL
+#define KMEM_BUFTAG_FREE 0x0cb1eef40cb1eef4UL
+#endif /* _HOST_BIG_ENDIAN */
+#else /* __LP64__ */
+#if _HOST_BIG_ENDIAN
+#define KMEM_BUFTAG_ALLOC 0xa110c8edUL
+#define KMEM_BUFTAG_FREE 0xf4eeb10cUL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_BUFTAG_ALLOC 0xedc810a1UL
+#define KMEM_BUFTAG_FREE 0x0cb1eef4UL
+#endif /* _HOST_BIG_ENDIAN */
+#endif /* __LP64__ */
+
+/*
+ * Free and uninitialized patterns.
+ *
+ * These values are unconditionnally 64-bit wide since buffers are at least
+ * 8-byte aligned.
+ */
+#if _HOST_BIG_ENDIAN
+#define KMEM_FREE_PATTERN 0xdeadbeefdeadbeefULL
+#define KMEM_UNINIT_PATTERN 0xbaddcafebaddcafeULL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_FREE_PATTERN 0xefbeaddeefbeaddeULL
+#define KMEM_UNINIT_PATTERN 0xfecaddbafecaddbaULL
+#endif /* _HOST_BIG_ENDIAN */
+
+/*
+ * Cache flags.
+ *
+ * The flags don't change once set and can be tested without locking.
+ */
+#define KMEM_CF_NO_CPU_POOL 0x01 /* CPU pool layer disabled */
+#define KMEM_CF_SLAB_EXTERNAL 0x02 /* Slab data is off slab */
+#define KMEM_CF_NO_RECLAIM 0x04 /* Slabs are not reclaimable */
+#define KMEM_CF_VERIFY 0x08 /* Debugging facilities enabled */
+#define KMEM_CF_DIRECT 0x10 /* No buf-to-slab tree lookup */
+
+/*
+ * Options for kmem_cache_alloc_verify().
+ */
+#define KMEM_AV_NOCONSTRUCT 0
+#define KMEM_AV_CONSTRUCT 1
+
+/*
+ * Error codes for kmem_cache_error().
+ */
+#define KMEM_ERR_INVALID 0 /* Invalid address being freed */
+#define KMEM_ERR_DOUBLEFREE 1 /* Freeing already free address */
+#define KMEM_ERR_BUFTAG 2 /* Invalid buftag content */
+#define KMEM_ERR_MODIFIED 3 /* Buffer modified while free */
+#define KMEM_ERR_REDZONE 4 /* Redzone violation */
+
+#if SLAB_USE_CPU_POOLS
+/*
+ * Available CPU pool types.
+ *
+ * For each entry, the CPU pool size applies from the entry buf_size
+ * (excluded) up to (and including) the buf_size of the preceding entry.
+ *
+ * See struct kmem_cpu_pool_type for a description of the values.
+ */
+static struct kmem_cpu_pool_type kmem_cpu_pool_types[] = {
+ { 32768, 1, 0, NULL },
+ { 4096, 8, CPU_L1_SIZE, NULL },
+ { 256, 64, CPU_L1_SIZE, NULL },
+ { 0, 128, CPU_L1_SIZE, NULL }
+};
+
+/*
+ * Caches where CPU pool arrays are allocated from.
+ */
+static struct kmem_cache kmem_cpu_array_caches[ARRAY_SIZE(kmem_cpu_pool_types)];
+#endif /* SLAB_USE_CPU_POOLS */
+
+/*
+ * Cache for off slab data.
+ */
+static struct kmem_cache kmem_slab_cache;
+
+/*
+ * General purpose caches array.
+ */
+static struct kmem_cache kalloc_caches[KALLOC_NR_CACHES];
+
+/*
+ * List of all caches managed by the allocator.
+ */
+static struct list kmem_cache_list;
+static unsigned int kmem_nr_caches;
+static simple_lock_data_t __attribute__((used)) kmem_cache_list_lock;
+
+/*
+ * VM submap for slab caches.
+ */
+static struct vm_map kmem_map_store;
+vm_map_t kmem_map = &kmem_map_store;
+
+/*
+ * Time of the last memory reclaim, in clock ticks.
+ */
+static unsigned long kmem_gc_last_tick;
+
+#define kmem_error(format, ...) \
+ printf("mem: error: %s(): " format "\n", __func__, \
+ ## __VA_ARGS__)
+
+#define kmem_warn(format, ...) \
+ printf("mem: warning: %s(): " format "\n", __func__, \
+ ## __VA_ARGS__)
+
+#define kmem_print(format, ...) \
+ printf(format "\n", ## __VA_ARGS__)
+
+static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
+ void *arg);
+static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache);
+static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf);
+
+static void * kmem_buf_verify_bytes(void *buf, void *pattern, size_t size)
+{
+ char *ptr, *pattern_ptr, *end;
+
+ end = buf + size;
+
+ for (ptr = buf, pattern_ptr = pattern; ptr < end; ptr++, pattern_ptr++)
+ if (*ptr != *pattern_ptr)
+ return ptr;
+
+ return NULL;
+}
+
+static void * kmem_buf_verify(void *buf, uint64_t pattern, vm_size_t size)
+{
+ uint64_t *ptr, *end;
+
+ assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)));
+ assert(P2ALIGNED(size, sizeof(uint64_t)));
+
+ end = buf + size;
+
+ for (ptr = buf; ptr < end; ptr++)
+ if (*ptr != pattern)
+ return kmem_buf_verify_bytes(ptr, &pattern, sizeof(pattern));
+
+ return NULL;
+}
+
+static void kmem_buf_fill(void *buf, uint64_t pattern, size_t size)
+{
+ uint64_t *ptr, *end;
+
+ assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)));
+ assert(P2ALIGNED(size, sizeof(uint64_t)));
+
+ end = buf + size;
+
+ for (ptr = buf; ptr < end; ptr++)
+ *ptr = pattern;
+}
+
+static void * kmem_buf_verify_fill(void *buf, uint64_t old, uint64_t new,
+ size_t size)
+{
+ uint64_t *ptr, *end;
+
+ assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)));
+ assert(P2ALIGNED(size, sizeof(uint64_t)));
+
+ end = buf + size;
+
+ for (ptr = buf; ptr < end; ptr++) {
+ if (*ptr != old)
+ return kmem_buf_verify_bytes(ptr, &old, sizeof(old));
+
+ *ptr = new;
+ }
+
+ return NULL;
+}
+
+static inline union kmem_bufctl *
+kmem_buf_to_bufctl(void *buf, struct kmem_cache *cache)
+{
+ return (union kmem_bufctl *)(buf + cache->bufctl_dist);
+}
+
+static inline struct kmem_buftag *
+kmem_buf_to_buftag(void *buf, struct kmem_cache *cache)
+{
+ return (struct kmem_buftag *)(buf + cache->buftag_dist);
+}
+
+static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl,
+ struct kmem_cache *cache)
+{
+ return (void *)bufctl - cache->bufctl_dist;
+}
+
+static vm_offset_t kmem_pagealloc(vm_size_t size)
+{
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ kr = kmem_alloc_wired(kmem_map, &addr, size);
+
+ if (kr != KERN_SUCCESS)
+ return 0;
+
+ return addr;
+}
+
+static void kmem_pagefree(vm_offset_t ptr, vm_size_t size)
+{
+ kmem_free(kmem_map, ptr, size);
+}
+
+static void kmem_slab_create_verify(struct kmem_slab *slab,
+ struct kmem_cache *cache)
+{
+ struct kmem_buftag *buftag;
+ size_t buf_size;
+ unsigned long buffers;
+ void *buf;
+
+ buf_size = cache->buf_size;
+ buf = slab->addr;
+ buftag = kmem_buf_to_buftag(buf, cache);
+
+ for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
+ kmem_buf_fill(buf, KMEM_FREE_PATTERN, cache->bufctl_dist);
+ buftag->state = KMEM_BUFTAG_FREE;
+ buf += buf_size;
+ buftag = kmem_buf_to_buftag(buf, cache);
+ }
+}
+
+/*
+ * Create an empty slab for a cache.
+ *
+ * The caller must drop all locks before calling this function.
+ */
+static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
+ size_t color)
+{
+ struct kmem_slab *slab;
+ union kmem_bufctl *bufctl;
+ size_t buf_size;
+ unsigned long buffers;
+ void *slab_buf;
+
+ if (cache->slab_alloc_fn == NULL)
+ slab_buf = (void *)kmem_pagealloc(cache->slab_size);
+ else
+ slab_buf = (void *)cache->slab_alloc_fn(cache->slab_size);
+
+ if (slab_buf == NULL)
+ return NULL;
+
+ if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
+ assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
+ slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache);
+
+ if (slab == NULL) {
+ if (cache->slab_free_fn == NULL)
+ kmem_pagefree((vm_offset_t)slab_buf, cache->slab_size);
+ else
+ cache->slab_free_fn((vm_offset_t)slab_buf, cache->slab_size);
+
+ return NULL;
+ }
+ } else {
+ slab = (struct kmem_slab *)(slab_buf + cache->slab_size) - 1;
+ }
+
+ list_node_init(&slab->list_node);
+ rbtree_node_init(&slab->tree_node);
+ slab->nr_refs = 0;
+ slab->first_free = NULL;
+ slab->addr = slab_buf + color;
+
+ buf_size = cache->buf_size;
+ bufctl = kmem_buf_to_bufctl(slab->addr, cache);
+
+ for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
+ bufctl->next = slab->first_free;
+ slab->first_free = bufctl;
+ bufctl = (union kmem_bufctl *)((void *)bufctl + buf_size);
+ }
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ kmem_slab_create_verify(slab, cache);
+
+ return slab;
+}
+
+static void kmem_slab_destroy_verify(struct kmem_slab *slab,
+ struct kmem_cache *cache)
+{
+ struct kmem_buftag *buftag;
+ size_t buf_size;
+ unsigned long buffers;
+ void *buf, *addr;
+
+ buf_size = cache->buf_size;
+ buf = slab->addr;
+ buftag = kmem_buf_to_buftag(buf, cache);
+
+ for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
+ if (buftag->state != KMEM_BUFTAG_FREE)
+ kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
+
+ addr = kmem_buf_verify(buf, KMEM_FREE_PATTERN, cache->bufctl_dist);
+
+ if (addr != NULL)
+ kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED, addr);
+
+ buf += buf_size;
+ buftag = kmem_buf_to_buftag(buf, cache);
+ }
+}
+
+/*
+ * Destroy a slab.
+ *
+ * The caller must drop all locks before calling this function.
+ */
+static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache)
+{
+ vm_offset_t slab_buf;
+
+ assert(slab->nr_refs == 0);
+ assert(slab->first_free != NULL);
+ assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ kmem_slab_destroy_verify(slab, cache);
+
+ slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE);
+
+ if (cache->slab_free_fn == NULL)
+ kmem_pagefree(slab_buf, cache->slab_size);
+ else
+ cache->slab_free_fn(slab_buf, cache->slab_size);
+
+ if (cache->flags & KMEM_CF_SLAB_EXTERNAL)
+ kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab);
+}
+
+static inline int kmem_slab_use_tree(int flags)
+{
+ return !(flags & KMEM_CF_DIRECT) || (flags & KMEM_CF_VERIFY);
+}
+
+static inline int kmem_slab_cmp_lookup(const void *addr,
+ const struct rbtree_node *node)
+{
+ struct kmem_slab *slab;
+
+ slab = rbtree_entry(node, struct kmem_slab, tree_node);
+
+ if (addr == slab->addr)
+ return 0;
+ else if (addr < slab->addr)
+ return -1;
+ else
+ return 1;
+}
+
+static inline int kmem_slab_cmp_insert(const struct rbtree_node *a,
+ const struct rbtree_node *b)
+{
+ struct kmem_slab *slab;
+
+ slab = rbtree_entry(a, struct kmem_slab, tree_node);
+ return kmem_slab_cmp_lookup(slab->addr, b);
+}
+
+#if SLAB_USE_CPU_POOLS
+static void kmem_cpu_pool_init(struct kmem_cpu_pool *cpu_pool,
+ struct kmem_cache *cache)
+{
+ simple_lock_init(&cpu_pool->lock);
+ cpu_pool->flags = cache->flags;
+ cpu_pool->size = 0;
+ cpu_pool->transfer_size = 0;
+ cpu_pool->nr_objs = 0;
+ cpu_pool->array = NULL;
+}
+
+/*
+ * Return a CPU pool.
+ *
+ * This function will generally return the pool matching the CPU running the
+ * calling thread. Because of context switches and thread migration, the
+ * caller might be running on another processor after this function returns.
+ * Although not optimal, this should rarely happen, and it doesn't affect the
+ * allocator operations in any other way, as CPU pools are always valid, and
+ * their access is serialized by a lock.
+ */
+static inline struct kmem_cpu_pool * kmem_cpu_pool_get(struct kmem_cache *cache)
+{
+ return &cache->cpu_pools[cpu_number()];
+}
+
+static inline void kmem_cpu_pool_build(struct kmem_cpu_pool *cpu_pool,
+ struct kmem_cache *cache, void **array)
+{
+ cpu_pool->size = cache->cpu_pool_type->array_size;
+ cpu_pool->transfer_size = (cpu_pool->size
+ + KMEM_CPU_POOL_TRANSFER_RATIO - 1)
+ / KMEM_CPU_POOL_TRANSFER_RATIO;
+ cpu_pool->array = array;
+}
+
+static inline void * kmem_cpu_pool_pop(struct kmem_cpu_pool *cpu_pool)
+{
+ cpu_pool->nr_objs--;
+ return cpu_pool->array[cpu_pool->nr_objs];
+}
+
+static inline void kmem_cpu_pool_push(struct kmem_cpu_pool *cpu_pool, void *obj)
+{
+ cpu_pool->array[cpu_pool->nr_objs] = obj;
+ cpu_pool->nr_objs++;
+}
+
+static int kmem_cpu_pool_fill(struct kmem_cpu_pool *cpu_pool,
+ struct kmem_cache *cache)
+{
+ void *obj;
+ int i;
+
+ simple_lock(&cache->lock);
+
+ for (i = 0; i < cpu_pool->transfer_size; i++) {
+ obj = kmem_cache_alloc_from_slab(cache);
+
+ if (obj == NULL)
+ break;
+
+ kmem_cpu_pool_push(cpu_pool, obj);
+ }
+
+ simple_unlock(&cache->lock);
+
+ return i;
+}
+
+static void kmem_cpu_pool_drain(struct kmem_cpu_pool *cpu_pool,
+ struct kmem_cache *cache)
+{
+ void *obj;
+ int i;
+
+ simple_lock(&cache->lock);
+
+ for (i = cpu_pool->transfer_size; i > 0; i--) {
+ obj = kmem_cpu_pool_pop(cpu_pool);
+ kmem_cache_free_to_slab(cache, obj);
+ }
+
+ simple_unlock(&cache->lock);
+}
+#endif /* SLAB_USE_CPU_POOLS */
+
+static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
+ void *arg)
+{
+ struct kmem_buftag *buftag;
+
+ kmem_error("cache: %s, buffer: %p", cache->name, (void *)buf);
+
+ switch(error) {
+ case KMEM_ERR_INVALID:
+ kmem_error("freeing invalid address");
+ break;
+ case KMEM_ERR_DOUBLEFREE:
+ kmem_error("attempting to free the same address twice");
+ break;
+ case KMEM_ERR_BUFTAG:
+ buftag = arg;
+ kmem_error("invalid buftag content, buftag state: %p",
+ (void *)buftag->state);
+ break;
+ case KMEM_ERR_MODIFIED:
+ kmem_error("free buffer modified, fault address: %p, "
+ "offset in buffer: %td", arg, arg - buf);
+ break;
+ case KMEM_ERR_REDZONE:
+ kmem_error("write beyond end of buffer, fault address: %p, "
+ "offset in buffer: %td", arg, arg - buf);
+ break;
+ default:
+ kmem_error("unknown error");
+ }
+
+ /*
+ * Never reached.
+ */
+}
+
+/*
+ * Compute an appropriate slab size for the given cache.
+ *
+ * Once the slab size is known, this function sets the related properties
+ * (buffers per slab and maximum color). It can also set the KMEM_CF_DIRECT
+ * and/or KMEM_CF_SLAB_EXTERNAL flags depending on the resulting layout.
+ */
+static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
+{
+ size_t i, buffers, buf_size, slab_size, free_slab_size, optimal_size;
+ size_t waste, waste_min;
+ int embed, optimal_embed = optimal_embed;
+
+ buf_size = cache->buf_size;
+
+ if (buf_size < KMEM_BUF_SIZE_THRESHOLD)
+ flags |= KMEM_CACHE_NOOFFSLAB;
+
+ i = 0;
+ waste_min = (size_t)-1;
+
+ do {
+ i++;
+ slab_size = P2ROUND(i * buf_size, PAGE_SIZE);
+ free_slab_size = slab_size;
+
+ if (flags & KMEM_CACHE_NOOFFSLAB)
+ free_slab_size -= sizeof(struct kmem_slab);
+
+ buffers = free_slab_size / buf_size;
+ waste = free_slab_size % buf_size;
+
+ if (buffers > i)
+ i = buffers;
+
+ if (flags & KMEM_CACHE_NOOFFSLAB)
+ embed = 1;
+ else if (sizeof(struct kmem_slab) <= waste) {
+ embed = 1;
+ waste -= sizeof(struct kmem_slab);
+ } else {
+ embed = 0;
+ }
+
+ if (waste <= waste_min) {
+ waste_min = waste;
+ optimal_size = slab_size;
+ optimal_embed = embed;
+ }
+ } while ((buffers < KMEM_MIN_BUFS_PER_SLAB)
+ && (slab_size < KMEM_SLAB_SIZE_THRESHOLD));
+
+ assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed);
+
+ cache->slab_size = optimal_size;
+ slab_size = cache->slab_size - (optimal_embed
+ ? sizeof(struct kmem_slab)
+ : 0);
+ cache->bufs_per_slab = slab_size / buf_size;
+ cache->color_max = slab_size % buf_size;
+
+ if (cache->color_max >= PAGE_SIZE)
+ cache->color_max = PAGE_SIZE - 1;
+
+ if (optimal_embed) {
+ if (cache->slab_size == PAGE_SIZE)
+ cache->flags |= KMEM_CF_DIRECT;
+ } else {
+ cache->flags |= KMEM_CF_SLAB_EXTERNAL;
+ }
+}
+
+void kmem_cache_init(struct kmem_cache *cache, const char *name,
+ size_t obj_size, size_t align, kmem_cache_ctor_t ctor,
+ kmem_slab_alloc_fn_t slab_alloc_fn,
+ kmem_slab_free_fn_t slab_free_fn, int flags)
+{
+#if SLAB_USE_CPU_POOLS
+ struct kmem_cpu_pool_type *cpu_pool_type;
+ size_t i;
+#endif /* SLAB_USE_CPU_POOLS */
+ size_t buf_size;
+
+#if SLAB_VERIFY
+ cache->flags = KMEM_CF_VERIFY;
+#else /* SLAB_VERIFY */
+ cache->flags = 0;
+#endif /* SLAB_VERIFY */
+
+ if (flags & KMEM_CACHE_NOCPUPOOL)
+ cache->flags |= KMEM_CF_NO_CPU_POOL;
+
+ if (flags & KMEM_CACHE_NORECLAIM) {
+ assert(slab_free_fn == NULL);
+ flags |= KMEM_CACHE_NOOFFSLAB;
+ cache->flags |= KMEM_CF_NO_RECLAIM;
+ }
+
+ if (flags & KMEM_CACHE_VERIFY)
+ cache->flags |= KMEM_CF_VERIFY;
+
+ if (align < KMEM_ALIGN_MIN)
+ align = KMEM_ALIGN_MIN;
+
+ assert(obj_size > 0);
+ assert(ISP2(align));
+ assert(align < PAGE_SIZE);
+
+ buf_size = P2ROUND(obj_size, align);
+
+ simple_lock_init(&cache->lock);
+ list_node_init(&cache->node);
+ list_init(&cache->partial_slabs);
+ list_init(&cache->free_slabs);
+ rbtree_init(&cache->active_slabs);
+ cache->obj_size = obj_size;
+ cache->align = align;
+ cache->buf_size = buf_size;
+ cache->bufctl_dist = buf_size - sizeof(union kmem_bufctl);
+ cache->color = 0;
+ cache->nr_objs = 0;
+ cache->nr_bufs = 0;
+ cache->nr_slabs = 0;
+ cache->nr_free_slabs = 0;
+ cache->ctor = ctor;
+ cache->slab_alloc_fn = slab_alloc_fn;
+ cache->slab_free_fn = slab_free_fn;
+ strncpy(cache->name, name, sizeof(cache->name));
+ cache->name[sizeof(cache->name) - 1] = '\0';
+ cache->buftag_dist = 0;
+ cache->redzone_pad = 0;
+
+ if (cache->flags & KMEM_CF_VERIFY) {
+ cache->bufctl_dist = buf_size;
+ cache->buftag_dist = cache->bufctl_dist + sizeof(union kmem_bufctl);
+ cache->redzone_pad = cache->bufctl_dist - cache->obj_size;
+ buf_size += sizeof(union kmem_bufctl) + sizeof(struct kmem_buftag);
+ buf_size = P2ROUND(buf_size, align);
+ cache->buf_size = buf_size;
+ }
+
+ kmem_cache_compute_sizes(cache, flags);
+
+#if SLAB_USE_CPU_POOLS
+ for (cpu_pool_type = kmem_cpu_pool_types;
+ buf_size <= cpu_pool_type->buf_size;
+ cpu_pool_type++);
+
+ cache->cpu_pool_type = cpu_pool_type;
+
+ for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++)
+ kmem_cpu_pool_init(&cache->cpu_pools[i], cache);
+#endif /* SLAB_USE_CPU_POOLS */
+
+ simple_lock(&kmem_cache_list_lock);
+ list_insert_tail(&kmem_cache_list, &cache->node);
+ kmem_nr_caches++;
+ simple_unlock(&kmem_cache_list_lock);
+}
+
+static inline int kmem_cache_empty(struct kmem_cache *cache)
+{
+ return cache->nr_objs == cache->nr_bufs;
+}
+
+static int kmem_cache_grow(struct kmem_cache *cache)
+{
+ struct kmem_slab *slab;
+ size_t color;
+ int empty;
+
+ simple_lock(&cache->lock);
+
+ if (!kmem_cache_empty(cache)) {
+ simple_unlock(&cache->lock);
+ return 1;
+ }
+
+ color = cache->color;
+ cache->color += cache->align;
+
+ if (cache->color > cache->color_max)
+ cache->color = 0;
+
+ simple_unlock(&cache->lock);
+
+ slab = kmem_slab_create(cache, color);
+
+ simple_lock(&cache->lock);
+
+ if (slab != NULL) {
+ list_insert_tail(&cache->free_slabs, &slab->list_node);
+ cache->nr_bufs += cache->bufs_per_slab;
+ cache->nr_slabs++;
+ cache->nr_free_slabs++;
+ }
+
+ /*
+ * Even if our slab creation failed, another thread might have succeeded
+ * in growing the cache.
+ */
+ empty = kmem_cache_empty(cache);
+
+ simple_unlock(&cache->lock);
+
+ return !empty;
+}
+
+static void kmem_cache_reap(struct kmem_cache *cache)
+{
+ struct kmem_slab *slab;
+ struct list dead_slabs;
+
+ if (cache->flags & KMEM_CF_NO_RECLAIM)
+ return;
+
+ list_init(&dead_slabs);
+
+ simple_lock(&cache->lock);
+
+ while (!list_empty(&cache->free_slabs)) {
+ slab = list_first_entry(&cache->free_slabs, struct kmem_slab,
+ list_node);
+ list_remove(&slab->list_node);
+ list_insert(&dead_slabs, &slab->list_node);
+ cache->nr_bufs -= cache->bufs_per_slab;
+ cache->nr_slabs--;
+ cache->nr_free_slabs--;
+ }
+
+ simple_unlock(&cache->lock);
+
+ while (!list_empty(&dead_slabs)) {
+ slab = list_first_entry(&dead_slabs, struct kmem_slab, list_node);
+ list_remove(&slab->list_node);
+ kmem_slab_destroy(slab, cache);
+ }
+}
+
+/*
+ * Allocate a raw (unconstructed) buffer from the slab layer of a cache.
+ *
+ * The cache must be locked before calling this function.
+ */
+static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache)
+{
+ struct kmem_slab *slab;
+ union kmem_bufctl *bufctl;
+
+ if (!list_empty(&cache->partial_slabs))
+ slab = list_first_entry(&cache->partial_slabs, struct kmem_slab,
+ list_node);
+ else if (!list_empty(&cache->free_slabs))
+ slab = list_first_entry(&cache->free_slabs, struct kmem_slab,
+ list_node);
+ else
+ return NULL;
+
+ bufctl = slab->first_free;
+ assert(bufctl != NULL);
+ slab->first_free = bufctl->next;
+ slab->nr_refs++;
+ cache->nr_objs++;
+
+ /*
+ * The slab has become complete.
+ */
+ if (slab->nr_refs == cache->bufs_per_slab) {
+ list_remove(&slab->list_node);
+
+ if (slab->nr_refs == 1)
+ cache->nr_free_slabs--;
+ } else if (slab->nr_refs == 1) {
+ /*
+ * The slab has become partial.
+ */
+ list_remove(&slab->list_node);
+ list_insert_tail(&cache->partial_slabs, &slab->list_node);
+ cache->nr_free_slabs--;
+ } else if (!list_singular(&cache->partial_slabs)) {
+ struct list *node;
+ struct kmem_slab *tmp;
+
+ /*
+ * The slab remains partial. If there are more than one partial slabs,
+ * maintain the list sorted.
+ */
+
+ assert(slab->nr_refs > 1);
+
+ for (node = list_prev(&slab->list_node);
+ !list_end(&cache->partial_slabs, node);
+ node = list_prev(node)) {
+ tmp = list_entry(node, struct kmem_slab, list_node);
+
+ if (tmp->nr_refs >= slab->nr_refs)
+ break;
+ }
+
+ /*
+ * If the direct neighbor was found, the list is already sorted.
+ * If no slab was found, the slab is inserted at the head of the list.
+ */
+ if (node != list_prev(&slab->list_node)) {
+ list_remove(&slab->list_node);
+ list_insert_after(node, &slab->list_node);
+ }
+ }
+
+ if ((slab->nr_refs == 1) && kmem_slab_use_tree(cache->flags))
+ rbtree_insert(&cache->active_slabs, &slab->tree_node,
+ kmem_slab_cmp_insert);
+
+ return kmem_bufctl_to_buf(bufctl, cache);
+}
+
+/*
+ * Release a buffer to the slab layer of a cache.
+ *
+ * The cache must be locked before calling this function.
+ */
+static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
+{
+ struct kmem_slab *slab;
+ union kmem_bufctl *bufctl;
+
+ if (cache->flags & KMEM_CF_DIRECT) {
+ assert(cache->slab_size == PAGE_SIZE);
+ slab = (struct kmem_slab *)P2END((unsigned long)buf, cache->slab_size)
+ - 1;
+ } else {
+ struct rbtree_node *node;
+
+ node = rbtree_lookup_nearest(&cache->active_slabs, buf,
+ kmem_slab_cmp_lookup, RBTREE_LEFT);
+ assert(node != NULL);
+ slab = rbtree_entry(node, struct kmem_slab, tree_node);
+ assert((unsigned long)buf < (P2ALIGN((unsigned long)slab->addr
+ + cache->slab_size, PAGE_SIZE)));
+ }
+
+ assert(slab->nr_refs >= 1);
+ assert(slab->nr_refs <= cache->bufs_per_slab);
+ bufctl = kmem_buf_to_bufctl(buf, cache);
+ bufctl->next = slab->first_free;
+ slab->first_free = bufctl;
+ slab->nr_refs--;
+ cache->nr_objs--;
+
+ /*
+ * The slab has become free.
+ */
+ if (slab->nr_refs == 0) {
+ if (kmem_slab_use_tree(cache->flags))
+ rbtree_remove(&cache->active_slabs, &slab->tree_node);
+
+ /*
+ * The slab was partial.
+ */
+ if (cache->bufs_per_slab > 1)
+ list_remove(&slab->list_node);
+
+ list_insert_tail(&cache->free_slabs, &slab->list_node);
+ cache->nr_free_slabs++;
+ } else if (slab->nr_refs == (cache->bufs_per_slab - 1)) {
+ /*
+ * The slab has become partial.
+ */
+ list_insert(&cache->partial_slabs, &slab->list_node);
+ } else if (!list_singular(&cache->partial_slabs)) {
+ struct list *node;
+ struct kmem_slab *tmp;
+
+ /*
+ * The slab remains partial. If there are more than one partial slabs,
+ * maintain the list sorted.
+ */
+
+ assert(slab->nr_refs > 0);
+
+ for (node = list_next(&slab->list_node);
+ !list_end(&cache->partial_slabs, node);
+ node = list_next(node)) {
+ tmp = list_entry(node, struct kmem_slab, list_node);
+
+ if (tmp->nr_refs <= slab->nr_refs)
+ break;
+ }
+
+ /*
+ * If the direct neighbor was found, the list is already sorted.
+ * If no slab was found, the slab is inserted at the tail of the list.
+ */
+ if (node != list_next(&slab->list_node)) {
+ list_remove(&slab->list_node);
+ list_insert_before(node, &slab->list_node);
+ }
+ }
+}
+
+static void kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf,
+ int construct)
+{
+ struct kmem_buftag *buftag;
+ union kmem_bufctl *bufctl;
+ void *addr;
+
+ buftag = kmem_buf_to_buftag(buf, cache);
+
+ if (buftag->state != KMEM_BUFTAG_FREE)
+ kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
+
+ addr = kmem_buf_verify_fill(buf, KMEM_FREE_PATTERN, KMEM_UNINIT_PATTERN,
+ cache->bufctl_dist);
+
+ if (addr != NULL)
+ kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED, addr);
+
+ addr = buf + cache->obj_size;
+ memset(addr, KMEM_REDZONE_BYTE, cache->redzone_pad);
+
+ bufctl = kmem_buf_to_bufctl(buf, cache);
+ bufctl->redzone = KMEM_REDZONE_WORD;
+ buftag->state = KMEM_BUFTAG_ALLOC;
+
+ if (construct && (cache->ctor != NULL))
+ cache->ctor(buf);
+}
+
+vm_offset_t kmem_cache_alloc(struct kmem_cache *cache)
+{
+ int filled;
+ void *buf;
+
+#if SLAB_USE_CPU_POOLS
+ struct kmem_cpu_pool *cpu_pool;
+
+ cpu_pool = kmem_cpu_pool_get(cache);
+
+ if (cpu_pool->flags & KMEM_CF_NO_CPU_POOL)
+ goto slab_alloc;
+
+ simple_lock(&cpu_pool->lock);
+
+fast_alloc:
+ if (likely(cpu_pool->nr_objs > 0)) {
+ buf = kmem_cpu_pool_pop(cpu_pool);
+ simple_unlock(&cpu_pool->lock);
+
+ if (cpu_pool->flags & KMEM_CF_VERIFY)
+ kmem_cache_alloc_verify(cache, buf, KMEM_AV_CONSTRUCT);
+
+ return (vm_offset_t)buf;
+ }
+
+ if (cpu_pool->array != NULL) {
+ filled = kmem_cpu_pool_fill(cpu_pool, cache);
+
+ if (!filled) {
+ simple_unlock(&cpu_pool->lock);
+
+ filled = kmem_cache_grow(cache);
+
+ if (!filled)
+ return 0;
+
+ simple_lock(&cpu_pool->lock);
+ }
+
+ goto fast_alloc;
+ }
+
+ simple_unlock(&cpu_pool->lock);
+#endif /* SLAB_USE_CPU_POOLS */
+
+slab_alloc:
+ simple_lock(&cache->lock);
+ buf = kmem_cache_alloc_from_slab(cache);
+ simple_unlock(&cache->lock);
+
+ if (buf == NULL) {
+ filled = kmem_cache_grow(cache);
+
+ if (!filled)
+ return 0;
+
+ goto slab_alloc;
+ }
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ kmem_cache_alloc_verify(cache, buf, KMEM_AV_NOCONSTRUCT);
+
+ if (cache->ctor != NULL)
+ cache->ctor(buf);
+
+ return (vm_offset_t)buf;
+}
+
+static void kmem_cache_free_verify(struct kmem_cache *cache, void *buf)
+{
+ struct rbtree_node *node;
+ struct kmem_buftag *buftag;
+ struct kmem_slab *slab;
+ union kmem_bufctl *bufctl;
+ unsigned char *redzone_byte;
+ unsigned long slabend;
+
+ simple_lock(&cache->lock);
+ node = rbtree_lookup_nearest(&cache->active_slabs, buf,
+ kmem_slab_cmp_lookup, RBTREE_LEFT);
+ simple_unlock(&cache->lock);
+
+ if (node == NULL)
+ kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+
+ slab = rbtree_entry(node, struct kmem_slab, tree_node);
+ slabend = P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE);
+
+ if ((unsigned long)buf >= slabend)
+ kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+
+ if ((((unsigned long)buf - (unsigned long)slab->addr) % cache->buf_size)
+ != 0)
+ kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+
+ /*
+ * As the buffer address is valid, accessing its buftag is safe.
+ */
+ buftag = kmem_buf_to_buftag(buf, cache);
+
+ if (buftag->state != KMEM_BUFTAG_ALLOC) {
+ if (buftag->state == KMEM_BUFTAG_FREE)
+ kmem_cache_error(cache, buf, KMEM_ERR_DOUBLEFREE, NULL);
+ else
+ kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
+ }
+
+ redzone_byte = buf + cache->obj_size;
+ bufctl = kmem_buf_to_bufctl(buf, cache);
+
+ while (redzone_byte < (unsigned char *)bufctl) {
+ if (*redzone_byte != KMEM_REDZONE_BYTE)
+ kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
+
+ redzone_byte++;
+ }
+
+ if (bufctl->redzone != KMEM_REDZONE_WORD) {
+ unsigned long word;
+
+ word = KMEM_REDZONE_WORD;
+ redzone_byte = kmem_buf_verify_bytes(&bufctl->redzone, &word,
+ sizeof(bufctl->redzone));
+ kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
+ }
+
+ kmem_buf_fill(buf, KMEM_FREE_PATTERN, cache->bufctl_dist);
+ buftag->state = KMEM_BUFTAG_FREE;
+}
+
+void kmem_cache_free(struct kmem_cache *cache, vm_offset_t obj)
+{
+#if SLAB_USE_CPU_POOLS
+ struct kmem_cpu_pool *cpu_pool;
+ void **array;
+
+ cpu_pool = kmem_cpu_pool_get(cache);
+
+ if (cpu_pool->flags & KMEM_CF_VERIFY) {
+#else /* SLAB_USE_CPU_POOLS */
+ if (cache->flags & KMEM_CF_VERIFY) {
+#endif /* SLAB_USE_CPU_POOLS */
+ kmem_cache_free_verify(cache, (void *)obj);
+ }
+
+#if SLAB_USE_CPU_POOLS
+ if (cpu_pool->flags & KMEM_CF_NO_CPU_POOL)
+ goto slab_free;
+
+ simple_lock(&cpu_pool->lock);
+
+fast_free:
+ if (likely(cpu_pool->nr_objs < cpu_pool->size)) {
+ kmem_cpu_pool_push(cpu_pool, (void *)obj);
+ simple_unlock(&cpu_pool->lock);
+ return;
+ }
+
+ if (cpu_pool->array != NULL) {
+ kmem_cpu_pool_drain(cpu_pool, cache);
+ goto fast_free;
+ }
+
+ simple_unlock(&cpu_pool->lock);
+
+ array = (void *)kmem_cache_alloc(cache->cpu_pool_type->array_cache);
+
+ if (array != NULL) {
+ simple_lock(&cpu_pool->lock);
+
+ /*
+ * Another thread may have built the CPU pool while the lock was
+ * dropped.
+ */
+ if (cpu_pool->array != NULL) {
+ simple_unlock(&cpu_pool->lock);
+ kmem_cache_free(cache->cpu_pool_type->array_cache,
+ (vm_offset_t)array);
+ goto fast_free;
+ }
+
+ kmem_cpu_pool_build(cpu_pool, cache, array);
+ goto fast_free;
+ }
+
+slab_free:
+#endif /* SLAB_USE_CPU_POOLS */
+
+ kmem_cache_free_to_slab(cache, (void *)obj);
+}
+
+void slab_collect(void)
+{
+ struct kmem_cache *cache;
+
+ if (elapsed_ticks <= (kmem_gc_last_tick + KMEM_GC_INTERVAL))
+ return;
+
+ kmem_gc_last_tick = elapsed_ticks;
+
+ simple_lock(&kmem_cache_list_lock);
+
+ list_for_each_entry(&kmem_cache_list, cache, node)
+ kmem_cache_reap(cache);
+
+ simple_unlock(&kmem_cache_list_lock);
+}
+
+void slab_bootstrap(void)
+{
+ /* Make sure a bufctl can always be stored in a buffer */
+ assert(sizeof(union kmem_bufctl) <= KMEM_ALIGN_MIN);
+
+ list_init(&kmem_cache_list);
+ simple_lock_init(&kmem_cache_list_lock);
+}
+
+void slab_init(void)
+{
+ vm_offset_t min, max;
+
+#if SLAB_USE_CPU_POOLS
+ struct kmem_cpu_pool_type *cpu_pool_type;
+ char name[KMEM_CACHE_NAME_SIZE];
+ size_t i, size;
+#endif /* SLAB_USE_CPU_POOLS */
+
+ kmem_submap(kmem_map, kernel_map, &min, &max, KMEM_MAP_SIZE, FALSE);
+
+#if SLAB_USE_CPU_POOLS
+ for (i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types); i++) {
+ cpu_pool_type = &kmem_cpu_pool_types[i];
+ cpu_pool_type->array_cache = &kmem_cpu_array_caches[i];
+ sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size);
+ size = sizeof(void *) * cpu_pool_type->array_size;
+ kmem_cache_init(cpu_pool_type->array_cache, name, size,
+ cpu_pool_type->array_align, NULL, NULL, NULL, 0);
+ }
+#endif /* SLAB_USE_CPU_POOLS */
+
+ /*
+ * Prevent off slab data for the slab cache to avoid infinite recursion.
+ */
+ kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab),
+ 0, NULL, NULL, NULL, KMEM_CACHE_NOOFFSLAB);
+}
+
+static vm_offset_t kalloc_pagealloc(vm_size_t size)
+{
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ kr = kmem_alloc_wired(kmem_map, &addr, size);
+
+ if (kr != KERN_SUCCESS)
+ return 0;
+
+ return addr;
+}
+
+static void kalloc_pagefree(vm_offset_t ptr, vm_size_t size)
+{
+ kmem_free(kmem_map, ptr, size);
+}
+
+void kalloc_init(void)
+{
+ char name[KMEM_CACHE_NAME_SIZE];
+ size_t i, size;
+ vm_offset_t min, max;
+
+ size = 1 << KALLOC_FIRST_SHIFT;
+
+ for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) {
+ sprintf(name, "kalloc_%lu", size);
+ kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL,
+ kalloc_pagealloc, kalloc_pagefree, 0);
+ size <<= 1;
+ }
+}
+
+/*
+ * Return the kalloc cache index matching the given allocation size, which
+ * must be strictly greater than 0.
+ */
+static inline size_t kalloc_get_index(unsigned long size)
+{
+ assert(size != 0);
+
+ size = (size - 1) >> KALLOC_FIRST_SHIFT;
+
+ if (size == 0)
+ return 0;
+ else
+ return (sizeof(long) * 8) - __builtin_clzl(size);
+}
+
+static void kalloc_verify(struct kmem_cache *cache, void *buf, size_t size)
+{
+ size_t redzone_size;
+ void *redzone;
+
+ assert(size <= cache->obj_size);
+
+ redzone = buf + size;
+ redzone_size = cache->obj_size - size;
+ memset(redzone, KMEM_REDZONE_BYTE, redzone_size);
+}
+
+vm_offset_t kalloc(vm_size_t size)
+{
+ size_t index;
+ void *buf;
+
+ if (size == 0)
+ return 0;
+
+ index = kalloc_get_index(size);
+
+ if (index < ARRAY_SIZE(kalloc_caches)) {
+ struct kmem_cache *cache;
+
+ cache = &kalloc_caches[index];
+ buf = (void *)kmem_cache_alloc(cache);
+
+ if ((buf != 0) && (cache->flags & KMEM_CF_VERIFY))
+ kalloc_verify(cache, buf, size);
+ } else
+ buf = (void *)kalloc_pagealloc(size);
+
+ return (vm_offset_t)buf;
+}
+
+static void kfree_verify(struct kmem_cache *cache, void *buf, size_t size)
+{
+ unsigned char *redzone_byte, *redzone_end;
+
+ assert(size <= cache->obj_size);
+
+ redzone_byte = buf + size;
+ redzone_end = buf + cache->obj_size;
+
+ while (redzone_byte < redzone_end) {
+ if (*redzone_byte != KMEM_REDZONE_BYTE)
+ kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
+
+ redzone_byte++;
+ }
+}
+
+void kfree(vm_offset_t data, vm_size_t size)
+{
+ size_t index;
+
+ if ((data == 0) || (size == 0))
+ return;
+
+ index = kalloc_get_index(size);
+
+ if (index < ARRAY_SIZE(kalloc_caches)) {
+ struct kmem_cache *cache;
+
+ cache = &kalloc_caches[index];
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ kfree_verify(cache, (void *)data, size);
+
+ kmem_cache_free(cache, data);
+ } else {
+ kalloc_pagefree(data, size);
+ }
+}
+
+void slab_info(void)
+{
+ struct kmem_cache *cache;
+ vm_size_t mem_usage, mem_reclaimable;
+
+ printf("cache obj slab bufs objs bufs "
+ " total reclaimable\n"
+ "name size size /slab usage count "
+ " memory memory\n");
+
+ simple_lock(&kmem_cache_list_lock);
+
+ list_for_each_entry(&kmem_cache_list, cache, node) {
+ simple_lock(&cache->lock);
+
+ mem_usage = (cache->nr_slabs * cache->slab_size) >> 10;
+ mem_reclaimable = (cache->nr_free_slabs * cache->slab_size) >> 10;
+
+ printf("%-19s %6lu %3luk %4lu %6lu %6lu %7luk %10luk\n",
+ cache->name, cache->obj_size, cache->slab_size >> 10,
+ cache->bufs_per_slab, cache->nr_objs, cache->nr_bufs,
+ mem_usage, mem_reclaimable);
+
+ simple_unlock(&cache->lock);
+ }
+
+ simple_unlock(&kmem_cache_list_lock);
+}
+
+#if MACH_DEBUG
+kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
+ unsigned int *infoCntp)
+{
+ struct kmem_cache *cache;
+ cache_info_t *info;
+ unsigned int i, nr_caches;
+ vm_size_t info_size = info_size;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /*
+ * Assume the cache list is unaltered once the kernel is ready.
+ */
+
+ simple_lock(&kmem_cache_list_lock);
+ nr_caches = kmem_nr_caches;
+ simple_unlock(&kmem_cache_list_lock);
+
+ if (nr_caches <= *infoCntp)
+ info = *infop;
+ else {
+ vm_offset_t info_addr;
+
+ info_size = round_page(nr_caches * sizeof(*info));
+ kr = kmem_alloc_pageable(ipc_kernel_map, &info_addr, info_size);
+
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ info = (cache_info_t *)info_addr;
+ }
+
+ if (info == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ i = 0;
+
+ list_for_each_entry(&kmem_cache_list, cache, node) {
+ simple_lock(&cache_lock);
+ info[i].flags = ((cache->flags & KMEM_CF_NO_CPU_POOL)
+ ? CACHE_FLAGS_NO_CPU_POOL : 0)
+ | ((cache->flags & KMEM_CF_SLAB_EXTERNAL)
+ ? CACHE_FLAGS_SLAB_EXTERNAL : 0)
+ | ((cache->flags & KMEM_CF_NO_RECLAIM)
+ ? CACHE_FLAGS_NO_RECLAIM : 0)
+ | ((cache->flags & KMEM_CF_VERIFY)
+ ? CACHE_FLAGS_VERIFY : 0)
+ | ((cache->flags & KMEM_CF_DIRECT)
+ ? CACHE_FLAGS_DIRECT : 0);
+#if SLAB_USE_CPU_POOLS
+ info[i].cpu_pool_size = cache->cpu_pool_type->array_size;
+#else /* SLAB_USE_CPU_POOLS */
+ info[i].cpu_pool_size = 0;
+#endif /* SLAB_USE_CPU_POOLS */
+ info[i].obj_size = cache->obj_size;
+ info[i].align = cache->align;
+ info[i].buf_size = cache->buf_size;
+ info[i].slab_size = cache->slab_size;
+ info[i].bufs_per_slab = cache->bufs_per_slab;
+ info[i].nr_objs = cache->nr_objs;
+ info[i].nr_bufs = cache->nr_bufs;
+ info[i].nr_slabs = cache->nr_slabs;
+ info[i].nr_free_slabs = cache->nr_free_slabs;
+ strncpy(info[i].name, cache->name, sizeof(info[i].name));
+ info[i].name[sizeof(info[i].name) - 1] = '\0';
+ simple_unlock(&cache->lock);
+
+ i++;
+ }
+
+ if (info != *infop) {
+ vm_map_copy_t copy;
+ vm_size_t used;
+
+ used = nr_caches * sizeof(*info);
+
+ if (used != info_size)
+ memset((char *)info + used, 0, info_size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, (vm_offset_t)info, used, TRUE,
+ &copy);
+
+ assert(kr == KERN_SUCCESS);
+ *infop = (cache_info_t *)copy;
+ }
+
+ *infoCntp = nr_caches;
+
+ return KERN_SUCCESS;
+}
+#endif /* MACH_DEBUG */
diff --git a/kern/slab.h b/kern/slab.h
new file mode 100644
index 00000000..47bef218
--- /dev/null
+++ b/kern/slab.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2011 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Object caching memory allocator.
+ */
+
+#ifndef _KERN_SLAB_H
+#define _KERN_SLAB_H
+
+#include <kern/lock.h>
+#include <kern/list.h>
+#include <kern/rbtree.h>
+#include <mach/machine/vm_types.h>
+#include <sys/types.h>
+#include <vm/vm_types.h>
+
+#if SLAB_USE_CPU_POOLS
+/*
+ * L1 cache line size.
+ */
+#define CPU_L1_SIZE (1 << CPU_L1_SHIFT)
+
+/*
+ * Per-processor cache of pre-constructed objects.
+ *
+ * The flags member is a read-only CPU-local copy of the parent cache flags.
+ */
+struct kmem_cpu_pool {
+ simple_lock_data_t lock;
+ int flags;
+ int size;
+ int transfer_size;
+ int nr_objs;
+ void **array;
+} __attribute__((aligned(CPU_L1_SIZE)));
+
+/*
+ * When a cache is created, its CPU pool type is determined from the buffer
+ * size. For small buffer sizes, many objects can be cached in a CPU pool.
+ * Conversely, for large buffer sizes, this would incur much overhead, so only
+ * a few objects are stored in a CPU pool.
+ */
+struct kmem_cpu_pool_type {
+ size_t buf_size;
+ int array_size;
+ size_t array_align;
+ struct kmem_cache *array_cache;
+};
+#endif /* SLAB_USE_CPU_POOLS */
+
+/*
+ * Buffer descriptor.
+ *
+ * For normal caches (i.e. without SLAB_CF_VERIFY), bufctls are located at the
+ * end of (but inside) each buffer. If SLAB_CF_VERIFY is set, bufctls are
+ * located after each buffer.
+ *
+ * When an object is allocated to a client, its bufctl isn't used. This memory
+ * is instead used for redzoning if cache debugging is in effect.
+ */
+union kmem_bufctl {
+ union kmem_bufctl *next;
+ unsigned long redzone;
+};
+
+/*
+ * Buffer tag.
+ *
+ * This structure is only used for SLAB_CF_VERIFY caches. It is located after
+ * the bufctl and includes information about the state of the buffer it
+ * describes (allocated or not). It should be thought of as a debugging
+ * extension of the bufctl.
+ */
+struct kmem_buftag {
+ unsigned long state;
+};
+
+/*
+ * Page-aligned collection of unconstructed buffers.
+ */
+struct kmem_slab {
+ struct list list_node;
+ struct rbtree_node tree_node;
+ unsigned long nr_refs;
+ union kmem_bufctl *first_free;
+ void *addr;
+};
+
+/*
+ * Type for constructor functions.
+ *
+ * The pre-constructed state of an object is supposed to include only
+ * elements such as e.g. linked lists, locks, reference counters. Therefore
+ * constructors are expected to 1) never fail and 2) not need any
+ * user-provided data. The first constraint implies that object construction
+ * never performs dynamic resource allocation, which also means there is no
+ * need for destructors.
+ */
+typedef void (*kmem_cache_ctor_t)(void *obj);
+
+/*
+ * Types for slab allocation/free functions.
+ *
+ * All addresses and sizes must be page-aligned.
+ */
+typedef vm_offset_t (*kmem_slab_alloc_fn_t)(vm_size_t);
+typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t);
+
+/*
+ * Cache name buffer size.
+ */
+#define KMEM_CACHE_NAME_SIZE 32
+
+/*
+ * Cache of objects.
+ *
+ * Locking order : cpu_pool -> cache. CPU pools locking is ordered by CPU ID.
+ *
+ * The partial slabs list is sorted by slab references. Slabs with a high
+ * number of references are placed first on the list to reduce fragmentation.
+ * Sorting occurs at insertion/removal of buffers in a slab. As the list
+ * is maintained sorted, and the number of references only changes by one,
+ * this is a very cheap operation in the average case and the worst (linear)
+ * case is very unlikely.
+ */
+struct kmem_cache {
+#if SLAB_USE_CPU_POOLS
+ /* CPU pool layer */
+ struct kmem_cpu_pool cpu_pools[NCPUS];
+ struct kmem_cpu_pool_type *cpu_pool_type;
+#endif /* SLAB_USE_CPU_POOLS */
+
+ /* Slab layer */
+ simple_lock_data_t lock;
+ struct list node; /* Cache list linkage */
+ struct list partial_slabs;
+ struct list free_slabs;
+ struct rbtree active_slabs;
+ int flags;
+ size_t obj_size; /* User-provided size */
+ size_t align;
+ size_t buf_size; /* Aligned object size */
+ size_t bufctl_dist; /* Distance from buffer to bufctl */
+ size_t slab_size;
+ size_t color;
+ size_t color_max;
+ unsigned long bufs_per_slab;
+ unsigned long nr_objs; /* Number of allocated objects */
+ unsigned long nr_bufs; /* Total number of buffers */
+ unsigned long nr_slabs;
+ unsigned long nr_free_slabs;
+ kmem_cache_ctor_t ctor;
+ kmem_slab_alloc_fn_t slab_alloc_fn;
+ kmem_slab_free_fn_t slab_free_fn;
+ char name[KMEM_CACHE_NAME_SIZE];
+ size_t buftag_dist; /* Distance from buffer to buftag */
+ size_t redzone_pad; /* Bytes from end of object to redzone word */
+};
+
+/*
+ * Mach-style declarations for struct kmem_cache.
+ */
+typedef struct kmem_cache *kmem_cache_t;
+#define KMEM_CACHE_NULL ((kmem_cache_t) 0)
+
+/*
+ * VM submap for slab allocations.
+ */
+extern vm_map_t kmem_map;
+
+/*
+ * Cache initialization flags.
+ */
+#define KMEM_CACHE_NOCPUPOOL 0x1 /* Don't use the per-cpu pools */
+#define KMEM_CACHE_NOOFFSLAB 0x2 /* Don't allocate external slab data */
+#define KMEM_CACHE_NORECLAIM 0x4 /* Never give slabs back to their source,
+ implies KMEM_CACHE_NOOFFSLAB */
+#define KMEM_CACHE_VERIFY 0x8 /* Use debugging facilities */
+
+/*
+ * Initialize a cache.
+ */
+void kmem_cache_init(struct kmem_cache *cache, const char *name,
+ size_t obj_size, size_t align, kmem_cache_ctor_t ctor,
+ kmem_slab_alloc_fn_t slab_alloc_fn,
+ kmem_slab_free_fn_t slab_free_fn, int flags);
+
+/*
+ * Allocate an object from a cache.
+ */
+vm_offset_t kmem_cache_alloc(struct kmem_cache *cache);
+
+/*
+ * Release an object to its cache.
+ */
+void kmem_cache_free(struct kmem_cache *cache, vm_offset_t obj);
+
+/*
+ * Initialize the memory allocator module.
+ */
+void slab_bootstrap(void);
+void slab_init(void);
+
+/*
+ * Release free slabs to the VM system.
+ */
+void slab_collect(void);
+
+/*
+ * Display a summary of all kernel caches.
+ */
+void slab_info(void);
+
+#endif /* _KERN_SLAB_H */
diff --git a/kern/startup.c b/kern/startup.c
index 75a05b0e..0c04ccf8 100644
--- a/kern/startup.c
+++ b/kern/startup.c
@@ -48,7 +48,6 @@
#include <kern/timer.h>
#include <kern/xpr.h>
#include <kern/time_stamp.h>
-#include <kern/zalloc.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
@@ -146,10 +145,10 @@ void setup_main()
timestamp_init();
- mapable_time_init();
-
machine_init();
+ mapable_time_init();
+
machine_info.max_cpus = NCPUS;
machine_info.memory_size = phys_last_addr - phys_first_addr; /* XXX mem_size */
machine_info.avail_cpus = 0;
diff --git a/kern/strings.c b/kern/strings.c
index 80e410e2..3676f98e 100644
--- a/kern/strings.c
+++ b/kern/strings.c
@@ -172,3 +172,22 @@ strlen(
return string - 1 - ret;
}
+
+/*
+ * Abstract:
+ * memset writes value "c" in the "n" bytes starting at address "s".
+ * The return value is a pointer to the "s" string.
+ */
+
+void *
+memset(
+ void *_s, int c, size_t n)
+{
+ char *s = _s;
+ int i;
+
+ for (i = 0; i < n ; i++)
+ s[i] = c;
+
+ return _s;
+}
diff --git a/kern/syscall_emulation.c b/kern/syscall_emulation.c
index 06781d5b..c1c3096c 100644
--- a/kern/syscall_emulation.c
+++ b/kern/syscall_emulation.c
@@ -336,24 +336,6 @@ task_set_emulation_vector(task, vector_start, emulation_vector,
}
/*
- * Compatibility entry. Vector is passed inline.
- */
-kern_return_t
-xxx_task_set_emulation_vector(task, vector_start, emulation_vector,
- emulation_vector_count)
- task_t task;
- int vector_start;
- emulation_vector_t emulation_vector;
- unsigned int emulation_vector_count;
-{
- return task_set_emulation_vector_internal(
- task,
- vector_start,
- emulation_vector,
- emulation_vector_count);
-}
-
-/*
* task_get_emulation_vector: [Server Entry]
*
* Get the list of emulated system calls for this task.
@@ -460,53 +442,6 @@ task_get_emulation_vector(task, vector_start, emulation_vector,
}
/*
- * xxx_task_get_emulation: [Server Entry]
- * get the list of emulated system calls for this task.
- * Compatibility code: return list in-line.
- */
-kern_return_t
-xxx_task_get_emulation_vector(task, vector_start, emulation_vector,
- emulation_vector_count)
- task_t task;
- int *vector_start;
- emulation_vector_t emulation_vector; /* pointer to OUT array */
- unsigned int *emulation_vector_count; /*IN/OUT*/
-{
- register eml_dispatch_t eml;
-
- if (task == TASK_NULL)
- return( EML_BAD_TASK );
-
- task_lock(task);
-
- eml = task->eml_dispatch;
- if (eml == EML_DISPATCH_NULL) {
- task_unlock(task);
- *vector_start = 0;
- *emulation_vector_count = 0;
- return( KERN_SUCCESS );
- }
-
- simple_lock(&eml->lock);
-
- if (*emulation_vector_count < eml->disp_count) {
- simple_unlock(&eml->lock);
- task_unlock(task);
- return( EML_BAD_CNT );
- }
-
- *vector_start = eml->disp_min;
- *emulation_vector_count = eml->disp_count;
- memcpy(emulation_vector, eml->disp_vector,
- *emulation_vector_count * sizeof(vm_offset_t));
- simple_unlock(&eml->lock);
-
- task_unlock(task);
-
- return( KERN_SUCCESS );
-}
-
-/*
* task_set_emulation: [Server Entry]
* set up for user space emulation of syscalls within this task.
*/
diff --git a/kern/syscall_subr.c b/kern/syscall_subr.c
index 395b9b8f..ae2d7d73 100644
--- a/kern/syscall_subr.c
+++ b/kern/syscall_subr.c
@@ -34,6 +34,7 @@
#include <kern/counters.h>
#include <kern/ipc_kobject.h>
#include <kern/mach_clock.h>
+#include <kern/printf.h>
#include <kern/processor.h>
#include <kern/sched.h>
#include <kern/sched_prim.h>
@@ -380,3 +381,18 @@ register thread_t thread;
(void) splx(s);
return(KERN_SUCCESS);
}
+
+/*
+ * mach_print
+ *
+ * Display a null-terminated character string on the Mach console.
+ * This system call is meant as a debugging tool useful to circumvent
+ * messaging altogether.
+ */
+#ifdef MACH_KDB
+void
+mach_print(const char *s)
+{
+ printf("%s", s);
+}
+#endif /* MACH_KDB */
diff --git a/kern/syscall_subr.h b/kern/syscall_subr.h
index 2b8bcd36..a2e39205 100644
--- a/kern/syscall_subr.h
+++ b/kern/syscall_subr.h
@@ -36,5 +36,6 @@ extern int swtch_pri(int);
extern int thread_switch(mach_port_t, int, mach_msg_timeout_t);
extern void thread_depress_timeout(thread_t);
extern kern_return_t thread_depress_abort(thread_t);
+extern void mach_print(const char *);
#endif /* _KERN_SYSCALL_SUBR_H_ */
diff --git a/kern/syscall_sw.c b/kern/syscall_sw.c
index b2e20e66..607d843e 100644
--- a/kern/syscall_sw.c
+++ b/kern/syscall_sw.c
@@ -122,8 +122,12 @@ mach_trap_t mach_trap_table[] = {
MACH_TRAP(mach_thread_self, 0), /* 27 */
MACH_TRAP(mach_task_self, 0), /* 28 */
MACH_TRAP(mach_host_self, 0), /* 29 */
+#ifdef MACH_KDB
+ MACH_TRAP_STACK(mach_print, 1), /* 30 */
+#else /* MACH_KDB */
+ MACH_TRAP_STACK(kern_invalid, 0), /* 30 */
+#endif /* MACH_KDB */
- MACH_TRAP(kern_invalid, 0), /* 30 */
MACH_TRAP(kern_invalid, 0), /* 31 */
MACH_TRAP(kern_invalid, 0), /* 32 */
MACH_TRAP(kern_invalid, 0), /* 33 emul: task_by_pid */
diff --git a/kern/task.c b/kern/task.c
index 88da16e8..114dd319 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -40,10 +40,9 @@
#include <ipc/ipc_space.h>
#include <ipc/ipc_types.h>
#include <kern/debug.h>
-#include <kern/mach_param.h>
#include <kern/task.h>
#include <kern/thread.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <kern/kalloc.h>
#include <kern/processor.h>
#include <kern/sched_prim.h> /* for thread_wakeup */
@@ -52,7 +51,7 @@
#include <machine/machspl.h> /* for splsched */
task_t kernel_task = TASK_NULL;
-zone_t task_zone;
+struct kmem_cache task_cache;
extern void eml_init(void);
extern void eml_task_reference(task_t, task_t);
@@ -60,11 +59,8 @@ extern void eml_task_deallocate(task_t);
void task_init(void)
{
- task_zone = zinit(
- sizeof(struct task), 0,
- TASK_MAX * sizeof(struct task),
- TASK_CHUNK * sizeof(struct task),
- 0, "tasks");
+ kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
+ NULL, NULL, NULL, 0);
eml_init();
machine_task_module_init ();
@@ -77,38 +73,6 @@ void task_init(void)
(void) task_create(TASK_NULL, FALSE, &kernel_task);
}
-/*
- * Create a task running in the kernel address space. It may
- * have its own map of size mem_size (if 0, it uses the kernel map),
- * and may have ipc privileges.
- */
-task_t kernel_task_create(
- task_t parent_task,
- vm_size_t map_size)
-{
- task_t new_task;
- vm_offset_t min, max;
-
- /*
- * Create the task.
- */
- (void) task_create(parent_task, FALSE, &new_task);
-
- /*
- * Task_create creates the task with a user-space map.
- * Remove the map and replace it with the kernel map
- * or a submap of the kernel map.
- */
- vm_map_deallocate(new_task->map);
- if (map_size == 0)
- new_task->map = kernel_map;
- else
- new_task->map = kmem_suballoc(kernel_map, &min, &max,
- map_size, FALSE);
-
- return new_task;
-}
-
kern_return_t task_create(
task_t parent_task,
boolean_t inherit_memory,
@@ -120,7 +84,7 @@ kern_return_t task_create(
int i;
#endif
- new_task = (task_t) zalloc(task_zone);
+ new_task = (task_t) kmem_cache_alloc(&task_cache);
if (new_task == TASK_NULL) {
panic("task_create: no memory for task structure");
}
@@ -144,6 +108,13 @@ kern_return_t task_create(
new_task->active = TRUE;
new_task->user_stop_count = 0;
new_task->thread_count = 0;
+ new_task->faults = 0;
+ new_task->zero_fills = 0;
+ new_task->reactivations = 0;
+ new_task->pageins = 0;
+ new_task->cow_faults = 0;
+ new_task->messages_sent = 0;
+ new_task->messages_received = 0;
eml_task_reference(new_task, parent_task);
@@ -235,7 +206,7 @@ void task_deallocate(
pset_deallocate(pset);
vm_map_deallocate(task->map);
is_release(task->itk_space);
- zfree(task_zone, (vm_offset_t) task);
+ kmem_cache_free(&task_cache, (vm_offset_t) task);
}
void task_reference(
@@ -783,6 +754,30 @@ kern_return_t task_info(
break;
}
+ case TASK_EVENTS_INFO:
+ {
+ register task_events_info_t event_info;
+
+ if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ event_info = (task_events_info_t) task_info_out;
+
+ task_lock(&task);
+ event_info->faults = task->faults;
+ event_info->zero_fills = task->zero_fills;
+ event_info->reactivations = task->reactivations;
+ event_info->pageins = task->pageins;
+ event_info->cow_faults = task->cow_faults;
+ event_info->messages_sent = task->messages_sent;
+ event_info->messages_received = task->messages_received;
+ task_unlock(&task);
+
+ *task_info_count = TASK_EVENTS_INFO_COUNT;
+ break;
+ }
+
case TASK_THREAD_TIMES_INFO:
{
register task_thread_times_info_t times_info;
diff --git a/kern/task.h b/kern/task.h
index 9d902435..9bfea571 100644
--- a/kern/task.h
+++ b/kern/task.h
@@ -102,6 +102,15 @@ struct task {
/* Hardware specific data. */
machine_task_t machine;
+
+ /* Statistics */
+ natural_t faults; /* page faults counter */
+ natural_t zero_fills; /* zero fill pages counter */
+ natural_t reactivations; /* reactivated pages counter */
+ natural_t pageins; /* actual pageins couter */
+ natural_t cow_faults; /* copy-on-write faults counter */
+ natural_t messages_sent; /* messages sent counter */
+ natural_t messages_received; /* messages received counter */
};
#define task_lock(task) simple_lock(&(task)->lock)
@@ -162,8 +171,6 @@ extern kern_return_t task_hold(task_t);
extern kern_return_t task_dowait(task_t, boolean_t);
extern kern_return_t task_release(task_t);
-extern task_t kernel_task_create(task_t, vm_size_t);
-
extern task_t kernel_task;
#endif /* _KERN_TASK_H_ */
diff --git a/kern/thread.c b/kern/thread.c
index 1548e143..87be9231 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -45,7 +45,6 @@
#include <kern/eventcount.h>
#include <kern/ipc_mig.h>
#include <kern/ipc_tt.h>
-#include <kern/mach_param.h>
#include <kern/processor.h>
#include <kern/queue.h>
#include <kern/sched.h>
@@ -54,7 +53,8 @@
#include <kern/thread.h>
#include <kern/thread_swap.h>
#include <kern/host.h>
-#include <kern/zalloc.h>
+#include <kern/kalloc.h>
+#include <kern/slab.h>
#include <kern/mach_clock.h>
#include <vm/vm_kern.h>
#include <ipc/ipc_kmsg.h>
@@ -67,7 +67,7 @@
thread_t active_threads[NCPUS];
vm_offset_t active_stacks[NCPUS];
-struct zone *thread_zone;
+struct kmem_cache thread_cache;
queue_head_t reaper_queue;
decl_simple_lock_data(, reaper_lock)
@@ -208,7 +208,7 @@ void stack_alloc(
* addresses of a stack given an address in the middle.
*/
- if (kmem_alloc_aligned(kernel_map, &stack, KERNEL_STACK_SIZE)
+ if (kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE)
!= KERN_SUCCESS)
panic("stack_alloc");
@@ -268,7 +268,7 @@ void stack_collect(void)
#if MACH_DEBUG
stack_finalize(stack);
#endif /* MACH_DEBUG */
- kmem_free(kernel_map, stack, KERNEL_STACK_SIZE);
+ kmem_free(kmem_map, stack, KERNEL_STACK_SIZE);
s = splsched();
stack_lock();
@@ -300,11 +300,8 @@ void stack_privilege(
void thread_init(void)
{
- thread_zone = zinit(
- sizeof(struct thread), 0,
- THREAD_MAX * sizeof(struct thread),
- THREAD_CHUNK * sizeof(struct thread),
- 0, "threads");
+ kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
+ NULL, NULL, NULL, 0);
/*
* Fill in a template thread for fast initialization.
@@ -414,7 +411,7 @@ kern_return_t thread_create(
* Allocate a thread and initialize static fields
*/
- new_thread = (thread_t) zalloc(thread_zone);
+ new_thread = (thread_t) kmem_cache_alloc(&thread_cache);
if (new_thread == THREAD_NULL)
return KERN_RESOURCE_SHORTAGE;
@@ -710,7 +707,7 @@ void thread_deallocate(
evc_notify_abort(thread);
pcb_terminate(thread);
- zfree(thread_zone, (vm_offset_t) thread);
+ kmem_cache_free(&thread_cache, (vm_offset_t) thread);
}
void thread_reference(
@@ -1329,6 +1326,13 @@ kern_return_t thread_suspend(
hold = FALSE;
spl = splsched();
thread_lock(thread);
+ /* Wait for thread to get interruptible */
+ while (thread->state & TH_UNINT) {
+ assert_wait(&thread->state, TRUE);
+ thread_unlock(thread);
+ thread_block(NULL);
+ thread_lock(thread);
+ }
if (thread->user_stop_count++ == 0) {
hold = TRUE;
thread->suspend_count++;
@@ -2395,7 +2399,7 @@ kern_return_t host_stack_usage(
vm_size_t *maxusagep,
vm_offset_t *maxstackp)
{
- unsigned int total;
+ natural_t total;
vm_size_t maxusage;
if (host == HOST_NULL)
diff --git a/kern/zalloc.c b/kern/zalloc.c
deleted file mode 100644
index a95c7f6b..00000000
--- a/kern/zalloc.c
+++ /dev/null
@@ -1,1007 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1993-1987 Carnegie Mellon University.
- * Copyright (c) 1993,1994 The University of Utah and
- * the Computer Systems Laboratory (CSL).
- * All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
- * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
- * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
- * THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * File: kern/zalloc.c
- * Author: Avadis Tevanian, Jr.
- *
- * Zone-based memory allocator. A zone is a collection of fixed size
- * data blocks for which quick allocation/deallocation is possible.
- */
-
-#include <string.h>
-
-#include <kern/debug.h>
-#include <kern/macro_help.h>
-#include <kern/printf.h>
-#include <kern/mach_clock.h>
-#include <kern/sched.h>
-#include <kern/zalloc.h>
-#include <mach/vm_param.h>
-#include <vm/vm_kern.h>
-#include <machine/machspl.h>
-
-#if MACH_DEBUG
-#include <mach/kern_return.h>
-#include <mach/machine/vm_types.h>
-#include <mach_debug/zone_info.h>
-#include <kern/host.h>
-#include <vm/vm_map.h>
-#include <vm/vm_user.h>
-#include <vm/vm_kern.h>
-#endif
-
-#define ADD_TO_ZONE(zone, element) \
-MACRO_BEGIN \
- *((vm_offset_t *)(element)) = (zone)->free_elements; \
- (zone)->free_elements = (vm_offset_t) (element); \
- zone_count_down(zone); \
-MACRO_END
-
-#define REMOVE_FROM_ZONE(zone, ret, type) \
-MACRO_BEGIN \
- (ret) = (type) (zone)->free_elements; \
- if ((ret) != (type) 0) { \
- zone_count_up(zone); \
- (zone)->free_elements = *((vm_offset_t *)(ret)); \
- } \
-MACRO_END
-
-#define ALIGN_SIZE_UP(size, align) \
-((size) = (((size) + ((align) - 1)) & ~((align) - 1)))
-
-/*
- * Support for garbage collection of unused zone pages:
- */
-
-struct zone_page_table_entry {
- struct zone_page_table_entry *next;
- short in_free_list;
- short alloc_count;
-};
-
-extern struct zone_page_table_entry * zone_page_table;
-extern vm_offset_t zone_map_min_address;
-
-#define lock_zone_page_table() simple_lock(&zone_page_table_lock)
-#define unlock_zone_page_table() simple_unlock(&zone_page_table_lock)
-
-#define zone_page(addr) \
- (&(zone_page_table[(atop(((vm_offset_t)addr) - zone_map_min_address))]))
-
-
-extern void zone_page_alloc(vm_offset_t, vm_size_t);
-extern void zone_page_dealloc(vm_offset_t, vm_size_t);
-extern void zone_page_in_use(vm_offset_t, vm_size_t);
-extern void zone_page_free(vm_offset_t, vm_size_t);
-
-zone_t zone_zone; /* this is the zone containing other zones */
-
-boolean_t zone_ignore_overflow = TRUE;
-
-vm_map_t zone_map = VM_MAP_NULL;
-vm_size_t zone_map_size = 64 * 1024 * 1024;
-
-/*
- * The VM system gives us an initial chunk of memory.
- * It has to be big enough to allocate the zone_zone
- * and some initial kernel data structures, like kernel maps.
- * It is advantageous to make it bigger than really necessary,
- * because this memory is more efficient than normal kernel
- * virtual memory. (It doesn't have vm_page structures backing it
- * and it may have other machine-dependent advantages.)
- * So for best performance, zdata_size should approximate
- * the amount of memory you expect the zone system to consume.
- */
-
-vm_offset_t zdata;
-vm_size_t zdata_size = 420 * 1024;
-
-#define zone_lock(zone) \
-MACRO_BEGIN \
- if (zone->type & ZONE_PAGEABLE) { \
- lock_write(&zone->complex_lock); \
- } else { \
- simple_lock(&zone->lock); \
- } \
-MACRO_END
-
-#define zone_unlock(zone) \
-MACRO_BEGIN \
- if (zone->type & ZONE_PAGEABLE) { \
- lock_done(&zone->complex_lock); \
- } else { \
- simple_unlock(&zone->lock); \
- } \
-MACRO_END
-
-#define zone_lock_init(zone) \
-MACRO_BEGIN \
- if (zone->type & ZONE_PAGEABLE) { \
- lock_init(&zone->complex_lock, TRUE); \
- } else { \
- simple_lock_init(&zone->lock); \
- } \
-MACRO_END
-
-static vm_offset_t zget_space(vm_offset_t size, vm_size_t align);
-
-decl_simple_lock_data(,zget_space_lock)
-vm_offset_t zalloc_next_space;
-vm_offset_t zalloc_end_of_space;
-vm_size_t zalloc_wasted_space;
-
-/*
- * Garbage collection map information
- */
-decl_simple_lock_data(,zone_page_table_lock)
-struct zone_page_table_entry * zone_page_table;
-vm_offset_t zone_map_min_address;
-vm_offset_t zone_map_max_address;
-int zone_pages;
-
-extern void zone_page_init(vm_offset_t, vm_size_t, int);
-
-#define ZONE_PAGE_USED 0
-#define ZONE_PAGE_UNUSED -1
-
-
-/*
- * Protects first_zone, last_zone, num_zones,
- * and the next_zone field of zones.
- */
-decl_simple_lock_data(,all_zones_lock)
-zone_t first_zone;
-zone_t *last_zone;
-int num_zones;
-
-/*
- * zinit initializes a new zone. The zone data structures themselves
- * are stored in a zone, which is initially a static structure that
- * is initialized by zone_init.
- */
-zone_t zinit(size, align, max, alloc, memtype, name)
- vm_size_t size; /* the size of an element */
- vm_size_t align; /* alignment of elements */
- vm_size_t max; /* maximum memory to use */
- vm_size_t alloc; /* allocation size */
- unsigned int memtype; /* flags specifying type of memory */
- char *name; /* a name for the zone */
-{
- register zone_t z;
-
- if (zone_zone == ZONE_NULL)
- z = (zone_t) zget_space(sizeof(struct zone), 0);
- else
- z = (zone_t) zalloc(zone_zone);
- if (z == ZONE_NULL)
- panic("zinit");
- if (alloc == 0)
- alloc = PAGE_SIZE;
-
- if (size == 0)
- size = sizeof(z->free_elements);
- /*
- * Round off all the parameters appropriately.
- */
-
- if ((max = round_page(max)) < (alloc = round_page(alloc)))
- max = alloc;
-
- if (align > 0) {
- if (PAGE_SIZE % align || align % sizeof(z->free_elements))
- panic("zinit");
- ALIGN_SIZE_UP(size, align);
- }
-
- z->free_elements = 0;
- z->cur_size = 0;
- z->max_size = max;
- z->elem_size = ((size-1) + sizeof(z->free_elements)) -
- ((size-1) % sizeof(z->free_elements));
- z->align = align;
-
- z->alloc_size = alloc;
- z->type = memtype;
- z->zone_name = name;
-#ifdef ZONE_COUNT
- z->count = 0;
-#endif
- z->doing_alloc = FALSE;
- zone_lock_init(z);
-
- /*
- * Add the zone to the all-zones list.
- */
-
- z->next_zone = ZONE_NULL;
- simple_lock(&all_zones_lock);
- *last_zone = z;
- last_zone = &z->next_zone;
- num_zones++;
- simple_unlock(&all_zones_lock);
-
- return(z);
-}
-
-/*
- * Cram the given memory into the specified zone.
- */
-void zcram(zone_t zone, vm_offset_t newmem, vm_size_t size)
-{
- register vm_size_t elem_size;
-
- if (newmem == (vm_offset_t) 0) {
- panic("zcram - memory at zero");
- }
- elem_size = zone->elem_size;
-
- zone_lock(zone);
- while (size >= elem_size) {
- ADD_TO_ZONE(zone, newmem);
- zone_page_alloc(newmem, elem_size);
- zone_count_up(zone); /* compensate for ADD_TO_ZONE */
- size -= elem_size;
- newmem += elem_size;
- zone->cur_size += elem_size;
- }
- zone_unlock(zone);
-}
-
-/*
- * Contiguous space allocator for non-paged zones. Allocates "size" amount
- * of memory from zone_map.
- */
-
-static vm_offset_t zget_space(vm_offset_t size, vm_size_t align)
-{
- vm_offset_t new_space = 0;
- vm_offset_t result;
- vm_size_t space_to_add = 0; /*'=0' to quiet gcc warnings */
-
- simple_lock(&zget_space_lock);
- if (align > 0) {
- assert(align < PAGE_SIZE);
- ALIGN_SIZE_UP(zalloc_next_space, align);
- }
-
- while ((zalloc_next_space + size) > zalloc_end_of_space) {
- /*
- * Add at least one page to allocation area.
- */
-
- space_to_add = round_page(size);
-
- if (new_space == 0) {
- /*
- * Memory cannot be wired down while holding
- * any locks that the pageout daemon might
- * need to free up pages. [Making the zget_space
- * lock a complex lock does not help in this
- * regard.]
- *
- * Unlock and allocate memory. Because several
- * threads might try to do this at once, don't
- * use the memory before checking for available
- * space again.
- */
-
- simple_unlock(&zget_space_lock);
-
- if (kmem_alloc_wired(zone_map,
- &new_space, space_to_add)
- != KERN_SUCCESS)
- return(0);
- zone_page_init(new_space, space_to_add,
- ZONE_PAGE_USED);
- simple_lock(&zget_space_lock);
- if (align > 0)
- ALIGN_SIZE_UP(zalloc_next_space, align);
- continue;
- }
-
-
- /*
- * Memory was allocated in a previous iteration.
- *
- * Check whether the new region is contiguous
- * with the old one.
- */
-
- if (new_space != zalloc_end_of_space) {
- /*
- * Throw away the remainder of the
- * old space, and start a new one.
- */
- zalloc_wasted_space +=
- zalloc_end_of_space - zalloc_next_space;
- zalloc_next_space = new_space;
- }
-
- zalloc_end_of_space = new_space + space_to_add;
-
- new_space = 0;
- }
- result = zalloc_next_space;
- zalloc_next_space += size;
- simple_unlock(&zget_space_lock);
-
- if (new_space != 0)
- kmem_free(zone_map, new_space, space_to_add);
-
- return(result);
-}
-
-
-/*
- * Initialize the "zone of zones" which uses fixed memory allocated
- * earlier in memory initialization. zone_bootstrap is called
- * before zone_init.
- */
-void zone_bootstrap(void)
-{
- simple_lock_init(&all_zones_lock);
- first_zone = ZONE_NULL;
- last_zone = &first_zone;
- num_zones = 0;
-
- simple_lock_init(&zget_space_lock);
- zalloc_next_space = zdata;
- zalloc_end_of_space = zdata + zdata_size;
- zalloc_wasted_space = 0;
-
- zone_zone = ZONE_NULL;
- zone_zone = zinit(sizeof(struct zone), 0, 128 * sizeof(struct zone),
- sizeof(struct zone), 0, "zones");
-}
-
-void zone_init(void)
-{
- vm_offset_t zone_min;
- vm_offset_t zone_max;
-
- vm_size_t zone_table_size;
-
- zone_map = kmem_suballoc(kernel_map, &zone_min, &zone_max,
- zone_map_size, FALSE);
-
- /*
- * Setup garbage collection information:
- */
-
- zone_table_size = atop(zone_max - zone_min) *
- sizeof(struct zone_page_table_entry);
- if (kmem_alloc_wired(zone_map, (vm_offset_t *) &zone_page_table,
- zone_table_size) != KERN_SUCCESS)
- panic("zone_init");
- zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size);
- zone_pages = atop(zone_max - zone_min);
- zone_map_min_address = zone_min;
- zone_map_max_address = zone_max;
- simple_lock_init(&zone_page_table_lock);
- zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED);
-}
-
-
-/*
- * zalloc returns an element from the specified zone.
- */
-vm_offset_t zalloc(zone_t zone)
-{
- vm_offset_t addr;
-
- if (zone == ZONE_NULL)
- panic ("zalloc: null zone");
-
- check_simple_locks();
-
- zone_lock(zone);
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
- while (addr == 0) {
- /*
- * If nothing was there, try to get more
- */
- if (zone->doing_alloc) {
- /*
- * Someone is allocating memory for this zone.
- * Wait for it to show up, then try again.
- */
- assert_wait((event_t)&zone->doing_alloc, TRUE);
- /* XXX say wakeup needed */
- zone_unlock(zone);
- thread_block((void (*)()) 0);
- zone_lock(zone);
- }
- else {
- if ((zone->cur_size + (zone->type & ZONE_PAGEABLE ?
- zone->alloc_size : zone->elem_size)) >
- zone->max_size) {
- if (zone->type & ZONE_EXHAUSTIBLE)
- break;
- /*
- * Printf calls logwakeup, which calls
- * select_wakeup which will do a zfree
- * (which tries to take the select_zone
- * lock... Hang. Release the lock now
- * so it can be taken again later.
- * NOTE: this used to be specific to
- * the select_zone, but for
- * cleanliness, we just unlock all
- * zones before this.
- */
- if (!(zone->type & ZONE_FIXED)) {
- /*
- * We're willing to overflow certain
- * zones, but not without complaining.
- *
- * This is best used in conjunction
- * with the collecatable flag. What we
- * want is an assurance we can get the
- * memory back, assuming there's no
- * leak.
- */
- zone->max_size += (zone->max_size >> 1);
- } else if (!zone_ignore_overflow) {
- zone_unlock(zone);
- printf("zone \"%s\" empty.\n",
- zone->zone_name);
- panic("zalloc: zone %s exhausted",
- zone->zone_name);
- }
- }
-
- if (zone->type & ZONE_PAGEABLE)
- zone->doing_alloc = TRUE;
- zone_unlock(zone);
-
- if (zone->type & ZONE_PAGEABLE) {
- if (kmem_alloc_pageable(zone_map, &addr,
- zone->alloc_size)
- != KERN_SUCCESS)
- panic("zalloc: zone %s exhausted",
- zone->zone_name);
- zcram(zone, addr, zone->alloc_size);
- zone_lock(zone);
- zone->doing_alloc = FALSE;
- /* XXX check before doing this */
- thread_wakeup((event_t)&zone->doing_alloc);
-
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
- } else if (zone->type & ZONE_COLLECTABLE) {
- if (kmem_alloc_wired(zone_map,
- &addr, zone->alloc_size)
- != KERN_SUCCESS)
- panic("zalloc: zone %s exhausted",
- zone->zone_name);
- zone_page_init(addr, zone->alloc_size,
- ZONE_PAGE_USED);
- zcram(zone, addr, zone->alloc_size);
- zone_lock(zone);
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
- } else {
- addr = zget_space(zone->elem_size, zone->align);
- if (addr == 0)
- panic("zalloc: zone %s exhausted",
- zone->zone_name);
-
- zone_lock(zone);
- zone_count_up(zone);
- zone->cur_size += zone->elem_size;
- zone_unlock(zone);
- zone_page_alloc(addr, zone->elem_size);
- return(addr);
- }
- }
- }
-
- zone_unlock(zone);
- return(addr);
-}
-
-
-/*
- * zget returns an element from the specified zone
- * and immediately returns nothing if there is nothing there.
- *
- * This form should be used when you can not block (like when
- * processing an interrupt).
- */
-vm_offset_t zget(zone_t zone)
-{
- register vm_offset_t addr;
-
- if (zone == ZONE_NULL)
- panic ("zalloc: null zone");
-
- zone_lock(zone);
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
- zone_unlock(zone);
-
- return(addr);
-}
-
-boolean_t zone_check = FALSE;
-
-void zfree(zone_t zone, vm_offset_t elem)
-{
- zone_lock(zone);
- if (zone_check) {
- vm_offset_t this;
-
- /* check the zone's consistency */
-
- for (this = zone->free_elements;
- this != 0;
- this = * (vm_offset_t *) this)
- if (this == elem)
- panic("zfree");
- }
- ADD_TO_ZONE(zone, elem);
- zone_unlock(zone);
-}
-
-/*
- * Zone garbage collection subroutines
- *
- * These routines have in common the modification of entries in the
- * zone_page_table. The latter contains one entry for every page
- * in the zone_map.
- *
- * For each page table entry in the given range:
- *
- * zone_page_in_use - decrements in_free_list
- * zone_page_free - increments in_free_list
- * zone_page_init - initializes in_free_list and alloc_count
- * zone_page_alloc - increments alloc_count
- * zone_page_dealloc - decrements alloc_count
- * zone_add_free_page_list - adds the page to the free list
- *
- * Two counts are maintained for each page, the in_free_list count and
- * alloc_count. The alloc_count is how many zone elements have been
- * allocated from a page. (Note that the page could contain elements
- * that span page boundaries. The count includes these elements so
- * one element may be counted in two pages.) In_free_list is a count
- * of how many zone elements are currently free. If in_free_list is
- * equal to alloc_count then the page is eligible for garbage
- * collection.
- *
- * Alloc_count and in_free_list are initialized to the correct values
- * for a particular zone when a page is zcram'ed into a zone. Subsequent
- * gets and frees of zone elements will call zone_page_in_use and
- * zone_page_free which modify the in_free_list count. When the zones
- * garbage collector runs it will walk through a zones free element list,
- * remove the elements that reside on collectable pages, and use
- * zone_add_free_page_list to create a list of pages to be collected.
- */
-
-void zone_page_in_use(addr, size)
-vm_offset_t addr;
-vm_size_t size;
-{
- int i, j;
- if ((addr < zone_map_min_address) ||
- (addr+size > zone_map_max_address)) return;
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
- lock_zone_page_table();
- for (; i <= j; i++) {
- zone_page_table[i].in_free_list--;
- }
- unlock_zone_page_table();
-}
-
-void zone_page_free(addr, size)
-vm_offset_t addr;
-vm_size_t size;
-{
- int i, j;
- if ((addr < zone_map_min_address) ||
- (addr+size > zone_map_max_address)) return;
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
- lock_zone_page_table();
- for (; i <= j; i++) {
- /* Set in_free_list to (ZONE_PAGE_USED + 1) if
- * it was previously set to ZONE_PAGE_UNUSED.
- */
- if (zone_page_table[i].in_free_list == ZONE_PAGE_UNUSED) {
- zone_page_table[i].in_free_list = 1;
- } else {
- zone_page_table[i].in_free_list++;
- }
- }
- unlock_zone_page_table();
-}
-
-void zone_page_init(addr, size, value)
-
-vm_offset_t addr;
-vm_size_t size;
-int value;
-{
- int i, j;
- if ((addr < zone_map_min_address) ||
- (addr+size > zone_map_max_address)) return;
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
- lock_zone_page_table();
- for (; i <= j; i++) {
- zone_page_table[i].alloc_count = value;
- zone_page_table[i].in_free_list = 0;
- }
- unlock_zone_page_table();
-}
-
-void zone_page_alloc(addr, size)
-vm_offset_t addr;
-vm_size_t size;
-{
- int i, j;
- if ((addr < zone_map_min_address) ||
- (addr+size > zone_map_max_address)) return;
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
- lock_zone_page_table();
- for (; i <= j; i++) {
- /* Set alloc_count to (ZONE_PAGE_USED + 1) if
- * it was previously set to ZONE_PAGE_UNUSED.
- */
- if (zone_page_table[i].alloc_count == ZONE_PAGE_UNUSED) {
- zone_page_table[i].alloc_count = 1;
- } else {
- zone_page_table[i].alloc_count++;
- }
- }
- unlock_zone_page_table();
-}
-
-void zone_page_dealloc(addr, size)
-vm_offset_t addr;
-vm_size_t size;
-{
- int i, j;
- if ((addr < zone_map_min_address) ||
- (addr+size > zone_map_max_address)) return;
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
- lock_zone_page_table();
- for (; i <= j; i++) {
- zone_page_table[i].alloc_count--;
- }
- unlock_zone_page_table();
-}
-
-void
-zone_add_free_page_list(free_list, addr, size)
- struct zone_page_table_entry **free_list;
- vm_offset_t addr;
- vm_size_t size;
-{
- int i, j;
- if ((addr < zone_map_min_address) ||
- (addr+size > zone_map_max_address)) return;
- i = atop(addr-zone_map_min_address);
- j = atop((addr+size-1) - zone_map_min_address);
- lock_zone_page_table();
- for (; i <= j; i++) {
- if (zone_page_table[i].alloc_count == 0) {
- zone_page_table[i].next = *free_list;
- *free_list = &zone_page_table[i];
- zone_page_table[i].alloc_count = ZONE_PAGE_UNUSED;
- zone_page_table[i].in_free_list = 0;
- }
- }
- unlock_zone_page_table();
-}
-
-
-/* This is used for walking through a zone's free element list.
- */
-struct zone_free_entry {
- struct zone_free_entry * next;
-};
-
-
-/* Zone garbage collection
- *
- * zone_gc will walk through all the free elements in all the
- * zones that are marked collectable looking for reclaimable
- * pages. zone_gc is called by consider_zone_gc when the system
- * begins to run out of memory.
- */
-static void zone_gc(void)
-{
- int max_zones;
- zone_t z;
- int i;
- register spl_t s;
- struct zone_page_table_entry *freep;
- struct zone_page_table_entry *zone_free_page_list;
-
- simple_lock(&all_zones_lock);
- max_zones = num_zones;
- z = first_zone;
- simple_unlock(&all_zones_lock);
-
- zone_free_page_list = (struct zone_page_table_entry *) 0;
-
- for (i = 0; i < max_zones; i++) {
- struct zone_free_entry * last;
- struct zone_free_entry * elt;
- assert(z != ZONE_NULL);
- /* run this at splhigh so that interupt routines that use zones
- can not interupt while their zone is locked */
- s=splhigh();
- zone_lock(z);
-
- if ((z->type & (ZONE_PAGEABLE|ZONE_COLLECTABLE)) == ZONE_COLLECTABLE) {
-
- /* Count the free elements in each page. This loop
- * requires that all in_free_list entries are zero.
- */
- elt = (struct zone_free_entry *)(z->free_elements);
- while ((elt != (struct zone_free_entry *)0)) {
- zone_page_free((vm_offset_t)elt, z->elem_size);
- elt = elt->next;
- }
-
- /* Now determine which elements should be removed
- * from the free list and, after all the elements
- * on a page have been removed, add the element's
- * page to a list of pages to be freed.
- */
- elt = (struct zone_free_entry *)(z->free_elements);
- last = elt;
- while ((elt != (struct zone_free_entry *)0)) {
- if (((vm_offset_t)elt>=zone_map_min_address)&&
- ((vm_offset_t)elt<=zone_map_max_address)&&
- (zone_page(elt)->in_free_list ==
- zone_page(elt)->alloc_count)) {
-
- z->cur_size -= z->elem_size;
- zone_page_in_use((vm_offset_t)elt, z->elem_size);
- zone_page_dealloc((vm_offset_t)elt, z->elem_size);
- if (zone_page(elt)->alloc_count == 0 ||
- zone_page(elt+(z->elem_size-1))->alloc_count==0) {
- zone_add_free_page_list(
- &zone_free_page_list,
- (vm_offset_t)elt, z->elem_size);
- }
-
-
- if (elt == last) {
- elt = elt->next;
- z->free_elements =(vm_offset_t)elt;
- last = elt;
- } else {
- last->next = elt->next;
- elt = elt->next;
- }
- } else {
- /* This element is not eligible for collection
- * so clear in_free_list in preparation for a
- * subsequent garbage collection pass.
- */
- if (((vm_offset_t)elt>=zone_map_min_address)&&
- ((vm_offset_t)elt<=zone_map_max_address)) {
- zone_page(elt)->in_free_list = 0;
- }
- last = elt;
- elt = elt->next;
- }
- }
- }
- zone_unlock(z);
- splx(s);
- simple_lock(&all_zones_lock);
- z = z->next_zone;
- simple_unlock(&all_zones_lock);
- }
-
- for (freep = zone_free_page_list; freep != 0; freep = freep->next) {
- vm_offset_t free_addr;
-
- free_addr = zone_map_min_address +
- PAGE_SIZE * (freep - zone_page_table);
-
- /* Hack Hack */
- /* Needed to make vm_map_delete's vm_map_clip_end always be
- * able to get an element without having to call zget_space and
- * hang because zone_map is already locked by vm_map_delete */
-
- extern zone_t vm_map_kentry_zone; /* zone for kernel entry structures */
- vm_offset_t entry1 = zalloc(vm_map_kentry_zone),
- entry2 = zalloc(vm_map_kentry_zone);
- zfree(vm_map_kentry_zone, entry1);
- zfree(vm_map_kentry_zone, entry2);
-
- kmem_free(zone_map, free_addr, PAGE_SIZE);
- }
-}
-
-boolean_t zone_gc_allowed = TRUE;
-unsigned zone_gc_last_tick = 0;
-unsigned zone_gc_max_rate = 0; /* in ticks */
-
-/*
- * consider_zone_gc:
- *
- * Called by the pageout daemon when the system needs more free pages.
- */
-
-void
-consider_zone_gc(void)
-{
- /*
- * By default, don't attempt zone GC more frequently
- * than once a second.
- */
-
- if (zone_gc_max_rate == 0)
- zone_gc_max_rate = hz;
-
- if (zone_gc_allowed &&
- (sched_tick > (zone_gc_last_tick + zone_gc_max_rate))) {
- zone_gc_last_tick = sched_tick;
- zone_gc();
- }
-}
-
-#if MACH_DEBUG
-kern_return_t host_zone_info(host, namesp, namesCntp, infop, infoCntp)
- host_t host;
- zone_name_array_t *namesp;
- unsigned int *namesCntp;
- zone_info_array_t *infop;
- unsigned int *infoCntp;
-{
- zone_name_t *names;
- vm_offset_t names_addr;
- vm_size_t names_size = 0; /*'=0' to quiet gcc warnings */
- zone_info_t *info;
- vm_offset_t info_addr;
- vm_size_t info_size = 0; /*'=0' to quiet gcc warnings */
- unsigned int max_zones, i;
- zone_t z;
- kern_return_t kr;
-
- if (host == HOST_NULL)
- return KERN_INVALID_HOST;
-
- /*
- * We assume that zones aren't freed once allocated.
- * We won't pick up any zones that are allocated later.
- */
-
- simple_lock(&all_zones_lock);
- max_zones = num_zones;
- z = first_zone;
- simple_unlock(&all_zones_lock);
-
- if (max_zones <= *namesCntp) {
- /* use in-line memory */
-
- names = *namesp;
- } else {
- names_size = round_page(max_zones * sizeof *names);
- kr = kmem_alloc_pageable(ipc_kernel_map,
- &names_addr, names_size);
- if (kr != KERN_SUCCESS)
- return kr;
-
- names = (zone_name_t *) names_addr;
- }
-
- if (max_zones <= *infoCntp) {
- /* use in-line memory */
-
- info = *infop;
- } else {
- info_size = round_page(max_zones * sizeof *info);
- kr = kmem_alloc_pageable(ipc_kernel_map,
- &info_addr, info_size);
- if (kr != KERN_SUCCESS) {
- if (names != *namesp)
- kmem_free(ipc_kernel_map,
- names_addr, names_size);
- return kr;
- }
-
- info = (zone_info_t *) info_addr;
- }
-
- for (i = 0; i < max_zones; i++) {
- zone_name_t *zn = &names[i];
- zone_info_t *zi = &info[i];
- struct zone zcopy;
-
- assert(z != ZONE_NULL);
-
- zone_lock(z);
- zcopy = *z;
- zone_unlock(z);
-
- simple_lock(&all_zones_lock);
- z = z->next_zone;
- simple_unlock(&all_zones_lock);
-
- /* assuming here the name data is static */
- (void) strncpy(zn->zn_name, zcopy.zone_name,
- sizeof zn->zn_name);
-
-#ifdef ZONE_COUNT
- zi->zi_count = zcopy.count;
-#else
- zi->zi_count = 0;
-#endif
- zi->zi_cur_size = zcopy.cur_size;
- zi->zi_max_size = zcopy.max_size;
- zi->zi_elem_size = zcopy.elem_size;
- zi->zi_alloc_size = zcopy.alloc_size;
- zi->zi_pageable = (zcopy.type & ZONE_PAGEABLE) != 0;
- zi->zi_exhaustible = (zcopy.type & ZONE_EXHAUSTIBLE) != 0;
- zi->zi_collectable = (zcopy.type & ZONE_COLLECTABLE) != 0;
- }
-
- if (names != *namesp) {
- vm_size_t used;
- vm_map_copy_t copy;
-
- used = max_zones * sizeof *names;
-
- if (used != names_size)
- memset((char *) (names_addr + used), 0, names_size - used);
-
- kr = vm_map_copyin(ipc_kernel_map, names_addr, names_size,
- TRUE, &copy);
- assert(kr == KERN_SUCCESS);
-
- *namesp = (zone_name_t *) copy;
- }
- *namesCntp = max_zones;
-
- if (info != *infop) {
- vm_size_t used;
- vm_map_copy_t copy;
-
- used = max_zones * sizeof *info;
-
- if (used != info_size)
- memset((char *) (info_addr + used), 0, info_size - used);
-
- kr = vm_map_copyin(ipc_kernel_map, info_addr, info_size,
- TRUE, &copy);
- assert(kr == KERN_SUCCESS);
-
- *infop = (zone_info_t *) copy;
- }
- *infoCntp = max_zones;
-
- return KERN_SUCCESS;
-}
-#endif /* MACH_DEBUG */
diff --git a/kern/zalloc.h b/kern/zalloc.h
deleted file mode 100644
index f1a1850b..00000000
--- a/kern/zalloc.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
- * Copyright (c) 1993,1994 The University of Utah and
- * the Computer Systems Laboratory (CSL).
- * All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
- * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
- * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
- * THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * File: zalloc.h
- * Author: Avadis Tevanian, Jr.
- * Date: 1985
- *
- */
-
-#ifndef _KERN_ZALLOC_H_
-#define _KERN_ZALLOC_H_
-
-#include <mach/machine/vm_types.h>
-#include <kern/macro_help.h>
-#include <kern/lock.h>
-#include <kern/queue.h>
-#include <machine/zalloc.h>
-
-/*
- * A zone is a collection of fixed size blocks for which there
- * is fast allocation/deallocation access. Kernel routines can
- * use zones to manage data structures dynamically, creating a zone
- * for each type of data structure to be managed.
- *
- */
-
-struct zone {
- decl_simple_lock_data(,lock) /* generic lock */
-#ifdef ZONE_COUNT
- int count; /* Number of elements used now */
-#endif
- vm_offset_t free_elements;
- vm_size_t cur_size; /* current memory utilization */
- vm_size_t max_size; /* how large can this zone grow */
- vm_size_t elem_size; /* size of an element */
- vm_size_t align; /* alignment of elements */
- vm_size_t alloc_size; /* size used for more memory */
- boolean_t doing_alloc; /* is zone expanding now? */
- char *zone_name; /* a name for the zone */
- unsigned int type; /* type of memory */
- lock_data_t complex_lock; /* Lock for pageable zones */
- struct zone *next_zone; /* Link for all-zones list */
-};
-typedef struct zone *zone_t;
-
-#define ZONE_NULL ((zone_t) 0)
-
-/* Exported to everyone */
-zone_t zinit(vm_size_t size, vm_size_t align, vm_size_t max,
- vm_size_t alloc, unsigned int memtype, char *name);
-vm_offset_t zalloc(zone_t zone);
-vm_offset_t zget(zone_t zone);
-void zfree(zone_t zone, vm_offset_t elem);
-void zcram(zone_t zone, vm_offset_t newmem, vm_size_t size);
-
-/* Exported only to vm/vm_init.c */
-void zone_bootstrap(void);
-void zone_init(void);
-
-/* Exported only to vm/vm_pageout.c */
-void consider_zone_gc(void);
-
-
-/* Memory type bits for zones */
-#define ZONE_PAGEABLE 0x00000001
-#define ZONE_COLLECTABLE 0x00000002 /* Garbage-collect this zone when memory runs low */
-#define ZONE_EXHAUSTIBLE 0x00000004 /* zalloc() on this zone is allowed to fail */
-#define ZONE_FIXED 0x00000008 /* Panic if zone is exhausted (XXX) */
-
-/* Machine-dependent code can provide additional memory types. */
-#define ZONE_MACHINE_TYPES 0xffff0000
-
-
-#ifdef ZONE_COUNT
-#define zone_count_up(zone) ((zone)->count++)
-#define zone_count_down(zone) ((zone)->count--)
-#else
-#define zone_count_up(zone)
-#define zone_count_down(zone)
-#endif
-
-
-
-/* These quick inline versions only work for small, nonpageable zones (currently). */
-
-static __inline vm_offset_t ZALLOC(zone_t zone)
-{
- simple_lock(&zone->lock);
- if (zone->free_elements == 0) {
- simple_unlock(&zone->lock);
- return zalloc(zone);
- } else {
- vm_offset_t element = zone->free_elements;
- zone->free_elements = *((vm_offset_t *)(element));
- zone_count_up(zone);
- simple_unlock(&zone->lock);
- return element;
- }
-}
-
-static __inline void ZFREE(zone_t zone, vm_offset_t element)
-{
- *((vm_offset_t *)(element)) = zone->free_elements;
- zone->free_elements = (vm_offset_t) (element);
- zone_count_down(zone);
-}
-
-
-
-#endif /* _KERN_ZALLOC_H_ */
diff --git a/linux/Makefrag.am b/linux/Makefrag.am
index fccb8070..7c7b432d 100644
--- a/linux/Makefrag.am
+++ b/linux/Makefrag.am
@@ -1,6 +1,6 @@
# Makefile fragment for Linux device drivers and the glue code.
-# Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+# Copyright (C) 2006, 2007, 2011 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
@@ -57,6 +57,7 @@ liblinux_a_SOURCES = \
linux/dev/kernel/sched.c \
linux/dev/glue/kmem.c \
linux/dev/glue/block.c \
+ linux/dev/glue/glue.h \
linux/dev/arch/i386/kernel/setup.c
liblinux_a_SOURCES += \
@@ -259,6 +260,14 @@ liblinux_a_SOURCES += \
linux/src/drivers/scsi/seagate.h
endif
+if device_driver_sym53c8xx
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/sym53c8xx.c \
+ linux/src/drivers/scsi/sym53c8xx_comm.h \
+ linux/src/drivers/scsi/sym53c8xx.h \
+ linux/src/drivers/scsi/sym53c8xx_defs.h
+endif
+
if device_driver_t128
liblinux_a_SOURCES += \
linux/src/drivers/scsi/t128.c \
diff --git a/linux/configfrag.ac b/linux/configfrag.ac
index cf671fcc..882af6bc 100644
--- a/linux/configfrag.ac
+++ b/linux/configfrag.ac
@@ -50,6 +50,11 @@ dnl USE OF THIS SOFTWARE.
esac
}]
+AC_DEFUN([AC_OPTION_Linux_group], [
+AC_ARG_ENABLE([$1-group],
+ AS_HELP_STRING([--enable-$1-group], [$2]))
+])
+
#
# AC_OPTION_Linux_ix86_at(name,description,option[,class]). Process
# configuration option --enable-`name' (with description `description'). If
@@ -62,7 +67,11 @@ AC_DEFUN([AC_OPTION_Linux_ix86_at], [
[unset enableval]
AC_ARG_ENABLE([$1],
AS_HELP_STRING([--enable-$1], [$2]))
-[#TODO. Could use some M4 magic to avoid a lot of shell code.
+[if test x$enable_$4_group = xno;
+then
+ enableval=${enableval-no}
+fi
+#TODO. Could use some M4 magic to avoid a lot of shell code.
case $host_platform:$host_cpu in
at:i?86)
case $enable_device_drivers:'$2' in
@@ -140,9 +149,16 @@ AC_Linux_DRIVER_qemu([ide],
[IDE disk controllers],
[CONFIG_BLK_DEV_IDE])
+AC_ARG_ENABLE([ide-forcedma],
+ AS_HELP_STRING([--enable-ide-forcedma], [enable forced use of DMA on IDE]),
+ [test x"$enableval" = xno ||
+ AC_DEFINE([CONFIG_BLK_DEV_FORCE_DMA], [1], [Force DMA on IDE block devices])])
+
dnl SCSI controllers.
+AC_OPTION_Linux_group([scsi], [SCSI drivers])
-AC_Linux_DRIVER([53c78xx],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([53c78xx],
[SCSI controller NCR 53C7,8xx],
[CONFIG_SCSI_NCR53C7xx],
[scsi])
@@ -181,7 +197,8 @@ AC_Linux_DRIVER([aha1740],
[SCSI controller Adaptec AHA-1740],
[CONFIG_SCSI_AHA1740],
[scsi])
-AC_Linux_DRIVER([aic7xxx],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([aic7xxx],
[SCSI controller Adaptec AIC7xxx],
[CONFIG_SCSI_AIC7XXX],
[scsi])
@@ -212,7 +229,8 @@ AC_Linux_DRIVER_nodef([g_NCR5380],
[SCSI controller Generic NCR5380/53c400 (ncr5380, ncr53c400)],
[CONFIG_SCSI_GENERIC_NCR5380],
[scsi])
-AC_Linux_DRIVER([gdth],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([gdth],
[GDT SCSI Disk Array Controller],
[CONFIG_SCSI_GDTH],
[scsi])
@@ -244,6 +262,10 @@ AC_Linux_DRIVER([seagate],
[SCSI controller Seagate ST02, Future Domain TMC-8xx],
[CONFIG_SCSI_SEAGATE],
[scsi])
+AC_Linux_DRIVER([sym53c8xx],
+ [SCSI controller Symbios 53C8XX],
+ [CONFIG_SCSI_SYM53C8XX],
+ [scsi])
AC_Linux_DRIVER([t128],
[SCSI controller Trantor T128/T128F/T228 (t128, t128f, t228)],
[CONFIG_SCSI_T128],
@@ -266,6 +288,7 @@ AC_Linux_DRIVER([wd7000],
[scsi])
dnl Ethernet controllers.
+AC_OPTION_Linux_group([net], [Network drivers])
AC_Linux_DRIVER([3c501],
[Ethernet controller 3COM 501 (3c501) / Etherlink I],
@@ -490,6 +513,7 @@ AC_Linux_DRIVER([znet],
[net])
dnl PCMCIA device support.
+AC_OPTION_Linux_group([pcmcia], [PCMCIA drivers])
AC_Linux_DRIVER([i82365],
[Intel 82365 PC Card controller],
@@ -537,6 +561,7 @@ AC_Linux_DRIVER([xirc2ps_cs],
[pcmcia])
dnl Wireless device drivers.
+AC_OPTION_Linux_group([wireless], [Wireless drivers])
AC_Linux_DRIVER([orinoco_cs],
[Hermes or Prism 2 PCMCIA Wireless adapters (Orinoco)],
diff --git a/linux/dev/arch/i386/kernel/irq.c b/linux/dev/arch/i386/kernel/irq.c
index 7faaa62c..68bf0c4b 100644
--- a/linux/dev/arch/i386/kernel/irq.c
+++ b/linux/dev/arch/i386/kernel/irq.c
@@ -47,10 +47,8 @@
#include <asm/io.h>
#include <asm/hardirq.h>
-extern void linux_timer_intr (void);
-extern spl_t splhigh (void);
-extern spl_t spl0 (void);
-extern void form_pic_mask (void);
+#include <linux/dev/glue/glue.h>
+#include <machine/machspl.h>
#if 0
/* XXX: This is the way it's done in linux 2.2. GNU Mach currently uses intr_count. It should be made using local_{bh/irq}_count instead (through hardirq_enter/exit) for SMP support. */
@@ -74,7 +72,7 @@ spl_t linux_intr_pri;
/*
* Flag indicating an interrupt is being handled.
*/
-unsigned long intr_count = 0;
+unsigned int intr_count = 0;
/*
* List of Linux interrupt handlers.
@@ -95,12 +93,6 @@ static struct linux_action *irq_action[16] =
NULL, NULL, NULL, NULL
};
-extern spl_t curr_ipl;
-extern int curr_pic_mask;
-extern int pic_mask[];
-
-extern void intnull (), prtnull ();
-
/*
* Generic interrupt handler for Linux devices.
* Set up a fake `struct pt_regs' then call the real handler.
@@ -223,15 +215,15 @@ setup_x86_irq (int irq, struct linux_action *new)
{
/* Can't share interrupts unless both agree to */
if (!(old->flags & new->flags & SA_SHIRQ))
- return (-LINUX_EBUSY);
+ return (-EBUSY);
/* Can't share interrupts unless both are same type */
if ((old->flags ^ new->flags) & SA_INTERRUPT)
- return (-LINUX_EBUSY);
+ return (-EBUSY);
/* Can't share at different levels */
if (intpri[irq] && linux_intr_pri != intpri[irq])
- return (-LINUX_EBUSY);
+ return (-EBUSY);
/* add new interrupt at end of irq queue */
do
@@ -271,7 +263,7 @@ request_irq (unsigned int irq, void (*handler) (int, void *, struct pt_regs *),
assert (irq < 16);
if (!handler)
- return -LINUX_EINVAL;
+ return -EINVAL;
/*
* Hmm... Should I use `kalloc()' ?
@@ -280,7 +272,7 @@ request_irq (unsigned int irq, void (*handler) (int, void *, struct pt_regs *),
action = (struct linux_action *)
linux_kmalloc (sizeof (struct linux_action), GFP_KERNEL);
if (action == NULL)
- return -LINUX_ENOMEM;
+ return -ENOMEM;
action->handler = handler;
action->next = NULL;
@@ -446,7 +438,6 @@ static void show(char * str)
int i;
unsigned long *stack;
int cpu = smp_processor_id();
- extern char *get_options(char *str, int *ints);
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [%d %d]\n",
diff --git a/linux/dev/drivers/block/floppy.c b/linux/dev/drivers/block/floppy.c
index d3fcd6af..4c0977a3 100644
--- a/linux/dev/drivers/block/floppy.c
+++ b/linux/dev/drivers/block/floppy.c
@@ -177,6 +177,8 @@ static inline int __get_order(unsigned long size);
#include <linux/blk.h>
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
+#include <linux/dev/glue/glue.h>
+
#ifndef FLOPPY_MOTOR_MASK
#define FLOPPY_MOTOR_MASK 0xf0
@@ -437,7 +439,6 @@ static int probing = 0;
static volatile int command_status = FD_COMMAND_NONE, fdc_busy = 0;
static struct wait_queue *fdc_wait = NULL, *command_done = NULL;
#ifdef MACH
-extern int issig (void);
#define NO_SIGNAL (! issig () || ! interruptible)
#else
#define NO_SIGNAL (!(current->signal & ~current->blocked) || !interruptible)
@@ -1412,7 +1413,7 @@ static int interpret_errors(void)
*/
static void setup_rw_floppy(void)
{
- int i,ready_date,r, flags,dflags;
+ int i, ready_date, r, flags;
timeout_fn function;
flags = raw_cmd->flags;
@@ -1435,7 +1436,6 @@ static void setup_rw_floppy(void)
if (wait_for_completion(ready_date,function))
return;
}
- dflags = DRS->flags;
if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
setup_DMA();
@@ -3492,6 +3492,8 @@ static void config_types(void)
printk("fd%d is unknown type %d",drive,
UDP->cmos);
}
+ else
+ allowed_drive_mask &= ~(1 << drive);
}
if (!first)
printk("\n");
@@ -4172,8 +4174,6 @@ static void floppy_release_irq_and_dma(void)
#ifdef MODULE
-extern char *get_options(char *str, int *ints);
-
char *floppy=NULL;
static void parse_floppy_cfg_string(char *cfg)
@@ -4277,11 +4277,10 @@ void cleanup_module(void)
* resource contention. */
void floppy_eject(void)
{
- int dummy;
if(floppy_grab_irq_and_dma()==0)
{
lock_fdc(MAXTIMEOUT,0);
- dummy=fd_eject(0);
+ fd_eject(0);
process_fd_request();
floppy_release_irq_and_dma();
}
diff --git a/linux/dev/drivers/block/genhd.c b/linux/dev/drivers/block/genhd.c
index 3946b39c..95b499b1 100644
--- a/linux/dev/drivers/block/genhd.c
+++ b/linux/dev/drivers/block/genhd.c
@@ -39,6 +39,7 @@
#ifdef MACH
#include <machine/spl.h>
+#include <linux/dev/glue/glue.h>
#endif
#define SYS_IND(p) get_unaligned(&p->sys_ind)
@@ -289,7 +290,7 @@ read_mbr:
printk(" unable to read partition table\n");
return -1;
}
- data = bh->b_data;
+ data = (unsigned char *)bh->b_data;
/* In some cases we modify the geometry */
/* of the drive (below), so ensure that */
/* nobody else tries to re-use this data. */
@@ -770,9 +771,7 @@ void device_setup(void)
struct gendisk *p;
int nr=0;
#ifdef MACH
- extern int linux_intr_pri;
-
- linux_intr_pri = SPL5;
+ linux_intr_pri = SPL6;
#endif
#ifndef MACH
diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c
index b920fb62..8c41f088 100644
--- a/linux/dev/glue/block.c
+++ b/linux/dev/glue/block.c
@@ -49,6 +49,8 @@
#include <mach/vm_param.h>
#include <mach/notify.h>
+#include <kern/kalloc.h>
+
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
@@ -61,6 +63,7 @@
#include <device/disk_status.h>
#include <device/device_reply.user.h>
#include <device/device_emul.h>
+#include <device/ds_routines.h>
/* TODO. This should be fixed to not be i386 specific. */
#include <i386at/disk.h>
@@ -78,9 +81,7 @@
#include <linux/hdreg.h>
#include <asm/io.h>
-extern int linux_auto_config;
-extern int linux_intr_pri;
-extern int linux_to_mach_error (int);
+#include <linux/dev/glue/glue.h>
/* This task queue is not used in Mach: just for fixing undefined symbols. */
DECLARE_TASK_QUEUE (tq_disk);
@@ -193,9 +194,6 @@ int read_ahead[MAX_BLKDEV] = {0, };
This is unused in Mach. It is here to make drivers compile. */
struct wait_queue *wait_for_request = NULL;
-/* Map for allocating device memory. */
-extern vm_map_t device_io_map;
-
/* Initialize block drivers. */
int
blk_dev_init ()
@@ -230,19 +228,17 @@ int
register_blkdev (unsigned major, const char *name,
struct file_operations *fops)
{
- int err = 0;
-
if (major == 0)
{
for (major = MAX_BLKDEV - 1; major > 0; major--)
if (blkdevs[major].fops == NULL)
goto out;
- return -LINUX_EBUSY;
+ return -EBUSY;
}
if (major >= MAX_BLKDEV)
- return -LINUX_EINVAL;
+ return -EINVAL;
if (blkdevs[major].fops && blkdevs[major].fops != fops)
- return -LINUX_EBUSY;
+ return -EBUSY;
out:
blkdevs[major].name = name;
@@ -260,12 +256,10 @@ out:
int
unregister_blkdev (unsigned major, const char *name)
{
- int err;
-
if (major >= MAX_BLKDEV)
- return -LINUX_EINVAL;
+ return -EINVAL;
if (! blkdevs[major].fops || strcmp (blkdevs[major].name, name))
- return -LINUX_EINVAL;
+ return -EINVAL;
blkdevs[major].fops = NULL;
if (blkdevs[major].labels)
{
@@ -280,8 +274,6 @@ unregister_blkdev (unsigned major, const char *name)
void
set_blocksize (kdev_t dev, int size)
{
- extern int *blksize_size[];
-
if (! blksize_size[MAJOR (dev)])
return;
@@ -315,7 +307,7 @@ alloc_buffer (int size)
d = current_thread ()->pcb->data;
assert (d);
queue_enter (&d->pages, m, vm_page_t, pageq);
- return (void *) m->phys_addr;
+ return (void *) phystokv(m->phys_addr);
}
return (void *) __get_free_pages (GFP_KERNEL, 0, ~0UL);
}
@@ -324,7 +316,6 @@ alloc_buffer (int size)
static void
free_buffer (void *p, int size)
{
- int i;
struct temp_data *d;
vm_page_t m;
@@ -336,7 +327,7 @@ free_buffer (void *p, int size)
assert (d);
queue_iterate (&d->pages, m, vm_page_t, pageq)
{
- if (m->phys_addr == (vm_offset_t) p)
+ if (phystokv(m->phys_addr) == (vm_offset_t) p)
{
queue_remove (&d->pages, m, vm_page_t, pageq);
VM_PAGE_FREE (m);
@@ -388,7 +379,6 @@ __brelse (struct buffer_head *bh)
struct buffer_head *
bread (kdev_t dev, int block, int size)
{
- int err;
struct buffer_head *bh;
bh = getblk (dev, block, size);
@@ -537,7 +527,7 @@ rdwr_partial (int rw, kdev_t dev, loff_t *off,
}
bh->b_data = alloc_buffer (bh->b_size);
if (! bh->b_data)
- return -LINUX_ENOMEM;
+ return -ENOMEM;
ll_rw_block (READ, 1, &bh);
wait_on_buffer (bh);
if (buffer_uptodate (bh))
@@ -556,7 +546,7 @@ rdwr_partial (int rw, kdev_t dev, loff_t *off,
wait_on_buffer (bh);
if (! buffer_uptodate (bh))
{
- err = -LINUX_EIO;
+ err = -EIO;
goto out;
}
}
@@ -565,7 +555,7 @@ rdwr_partial (int rw, kdev_t dev, loff_t *off,
*off += c;
}
else
- err = -LINUX_EIO;
+ err = -EIO;
out:
free_buffer (bh->b_data, bh->b_size);
return err;
@@ -610,15 +600,15 @@ rdwr_full (int rw, kdev_t dev, loff_t *off, char **buf, int *resid, int bshift)
if (cc > ((nbuf - nb) << bshift))
cc = (nbuf - nb) << bshift;
if (! test_bit (BH_Bounce, &bh->b_state))
- bh->b_data = (char *) pmap_extract (vm_map_pmap (device_io_map),
+ bh->b_data = (char *) phystokv(pmap_extract (vm_map_pmap (device_io_map),
(((vm_offset_t) *buf)
- + (nb << bshift)));
+ + (nb << bshift))));
else
{
bh->b_data = alloc_buffer (cc);
if (! bh->b_data)
{
- err = -LINUX_ENOMEM;
+ err = -ENOMEM;
break;
}
if (rw == WRITE)
@@ -642,7 +632,7 @@ rdwr_full (int rw, kdev_t dev, loff_t *off, char **buf, int *resid, int bshift)
&& rw == READ && test_bit (BH_Bounce, &bh->b_state))
memcpy (*buf + cc, bh->b_data, bh->b_size);
else if (! err && ! buffer_uptodate (bh))
- err = -LINUX_EIO;
+ err = -EIO;
if (test_bit (BH_Bounce, &bh->b_state))
free_buffer (bh->b_data, bh->b_size);
}
@@ -789,6 +779,7 @@ static struct block_data *open_list;
extern struct device_emulation_ops linux_block_emulation_ops;
static io_return_t device_close (void *);
+static io_return_t device_close_forced (void *, int);
/* Return a send right for block device BD. */
static ipc_port_t
@@ -922,7 +913,7 @@ static kern_return_t
init_partition (struct name_map *np, kdev_t *dev,
struct device_struct *ds, int slice, int *part)
{
- int err, i, j;
+ int i, j;
struct disklabel *lp;
struct gendisk *gd = ds->gd;
struct partition *p;
@@ -947,7 +938,7 @@ init_partition (struct name_map *np, kdev_t *dev,
if (gd->part[MINOR (d->inode.i_rdev)].nr_sects <= 0
|| gd->part[MINOR (d->inode.i_rdev)].start_sect < 0)
continue;
- linux_intr_pri = SPL5;
+ linux_intr_pri = SPL6;
d->file.f_flags = 0;
d->file.f_mode = O_RDONLY;
if (ds->fops->open && (*ds->fops->open) (&d->inode, &d->file))
@@ -1093,7 +1084,7 @@ device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
if (ds->fops->open)
{
td.inode.i_rdev = dev;
- linux_intr_pri = SPL5;
+ linux_intr_pri = SPL6;
err = (*ds->fops->open) (&td.inode, &td.file);
if (err)
{
@@ -1164,6 +1155,7 @@ out:
{
ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE);
ipc_port_dealloc_kernel (bd->port);
+ *devp = IP_NULL;
}
kfree ((vm_offset_t) bd, sizeof (struct block_data));
bd = NULL;
@@ -1174,18 +1166,16 @@ out:
bd->open_count = 1;
bd->next = open_list;
open_list = bd;
+ *devp = &bd -> device;
}
- if (IP_VALID (reply_port))
- ds_device_open_reply (reply_port, reply_port_type, err, dev_to_port (bd));
- else if (! err)
+ if (!IP_VALID (reply_port) && ! err)
device_close (bd);
-
- return MIG_NO_REPLY;
+ return err;
}
static io_return_t
-device_close (void *d)
+device_close_forced (void *d, int force)
{
struct block_data *bd = d, *bdp, **prev;
struct device_struct *ds = bd->ds;
@@ -1202,7 +1192,7 @@ device_close (void *d)
}
ds->busy = 1;
- if (--bd->open_count == 0)
+ if (force || --bd->open_count == 0)
{
/* Wait for pending I/O to complete. */
while (bd->iocount > 0)
@@ -1245,6 +1235,13 @@ device_close (void *d)
return D_SUCCESS;
}
+static io_return_t
+device_close (void *d)
+{
+ return device_close_forced (d, 0);
+}
+
+
#define MAX_COPY (VM_MAP_COPY_PAGE_LIST_MAX << PAGE_SHIFT)
/* Check block BN and size COUNT for I/O validity
@@ -1674,7 +1671,7 @@ device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
INIT_DATA();
if ((*bd->ds->fops->ioctl) (&td.inode, &td.file,
- HDIO_GETGEO, &hg))
+ HDIO_GETGEO, (unsigned long)&hg))
return D_INVALID_OPERATION;
dp->dp_type = DPT_WINI; /* XXX: It may be a floppy... */
@@ -1723,6 +1720,17 @@ device_set_status (void *d, dev_flavor_t flavor, dev_status_t status,
return D_INVALID_OPERATION;
}
+
+static void
+device_no_senders (mach_no_senders_notification_t *ns)
+{
+ device_t dev;
+
+ dev = dev_port_lookup((ipc_port_t) ns->not_header.msgh_remote_port);
+ assert(dev);
+ device_close_forced (dev->emul_data, 1);
+}
+
struct device_emulation_ops linux_block_emulation_ops =
{
NULL,
@@ -1738,7 +1746,7 @@ struct device_emulation_ops linux_block_emulation_ops =
device_get_status,
NULL,
NULL,
- NULL,
+ device_no_senders,
NULL,
NULL
};
diff --git a/linux/dev/glue/glue.h b/linux/dev/glue/glue.h
new file mode 100644
index 00000000..5d4f6d88
--- /dev/null
+++ b/linux/dev/glue/glue.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_DEV_GLUE_GLUE_H
+#define LINUX_DEV_GLUE_GLUE_H
+
+#include <vm/vm_types.h>
+#include <mach/machine/vm_types.h>
+
+extern int linux_auto_config;
+extern int linux_intr_pri;
+
+extern void *alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
+extern void free_contig_mem (vm_page_t);
+extern void init_IRQ (void);
+extern void restore_IRQ (void);
+extern void linux_kmem_init (void);
+extern void linux_net_emulation_init (void);
+extern void device_setup (void);
+extern void linux_timer_intr (void);
+extern void linux_bad_intr (int);
+extern void linux_sched_init (void);
+extern void pcmcia_init (void);
+extern void linux_soft_intr (void);
+extern int issig (void);
+extern int linux_to_mach_error (int);
+extern char *get_options(char *str, int *ints);
+
+#endif /* LINUX_DEV_GLUE_GLUE_H */
diff --git a/linux/dev/glue/kmem.c b/linux/dev/glue/kmem.c
index 8c21ce7d..28321711 100644
--- a/linux/dev/glue/kmem.c
+++ b/linux/dev/glue/kmem.c
@@ -29,6 +29,7 @@
#include <kern/assert.h>
#include <kern/kalloc.h>
+#include <kern/printf.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
@@ -40,14 +41,13 @@
#include <asm/system.h>
-extern void *alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
-extern int printf (const char *, ...);
+#include <linux/dev/glue/glue.h>
/* Amount of memory to reserve for Linux memory allocator.
We reserve 64K chunks to stay within DMA limits.
Increase MEM_CHUNKS if the kernel is running out of memory. */
#define MEM_CHUNK_SIZE (64 * 1024)
-#define MEM_CHUNKS 7
+#define MEM_CHUNKS 32
#define MEM_DMA_LIMIT (16 * 1024 * 1024)
/* Mininum amount that linux_kmalloc will allocate. */
@@ -218,7 +218,7 @@ void *
linux_kmalloc (unsigned int size, int priority)
{
int order, coalesced = 0;
- unsigned flags;
+ unsigned long flags;
struct pagehdr *ph;
struct blkhdr *bh, *new_bh;
@@ -310,7 +310,7 @@ again:
void
linux_kfree (void *p)
{
- unsigned flags;
+ unsigned long flags;
struct blkhdr *bh;
struct pagehdr *ph;
@@ -385,7 +385,8 @@ unsigned long
__get_free_pages (int priority, unsigned long order, int dma)
{
int i, pages_collected = 0;
- unsigned flags, bits, off, j, len;
+ unsigned bits, off, j, len;
+ unsigned long flags;
assert ((PAGE_SIZE << order) <= MEM_CHUNK_SIZE);
@@ -444,7 +445,8 @@ void
free_pages (unsigned long addr, unsigned long order)
{
int i;
- unsigned flags, bits, len, j;
+ unsigned bits, len, j;
+ unsigned long flags;
assert ((addr & PAGE_MASK) == 0);
@@ -554,7 +556,7 @@ vfree (void *addr)
if (!p)
panic ("vmalloc_list_lookup failure");
- kmem_free (kernel_map, addr, p->size);
+ kmem_free (kernel_map, (vm_offset_t) addr, p->size);
vmalloc_list_remove (p);
}
diff --git a/linux/dev/glue/misc.c b/linux/dev/glue/misc.c
index d8ca3c24..77dc31dd 100644
--- a/linux/dev/glue/misc.c
+++ b/linux/dev/glue/misc.c
@@ -53,6 +53,7 @@
#include <sys/types.h>
#include <mach/vm_param.h>
#include <kern/thread.h>
+#include <kern/printf.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <device/device_types.h>
@@ -66,10 +67,7 @@
#include <linux/blk.h>
#include <linux/proc_fs.h>
#include <linux/kernel_stat.h>
-
-extern boolean_t vm_map_lookup_entry (register vm_map_t, register vm_offset_t,
- vm_map_entry_t *);
-extern int printf (const char *, ...);
+#include <linux/dev/glue/glue.h>
int (*dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
off_t offset, int length, int inout) = 0;
@@ -84,34 +82,34 @@ linux_to_mach_error (int err)
case 0:
return D_SUCCESS;
- case -LINUX_EPERM:
+ case -EPERM:
return D_INVALID_OPERATION;
- case -LINUX_EIO:
+ case -EIO:
return D_IO_ERROR;
- case -LINUX_ENXIO:
+ case -ENXIO:
return D_NO_SUCH_DEVICE;
- case -LINUX_EACCES:
+ case -EACCES:
return D_INVALID_OPERATION;
- case -LINUX_EFAULT:
+ case -EFAULT:
return D_INVALID_SIZE;
- case -LINUX_EBUSY:
+ case -EBUSY:
return D_ALREADY_OPEN;
- case -LINUX_EINVAL:
+ case -EINVAL:
return D_INVALID_SIZE;
- case -LINUX_EROFS:
+ case -EROFS:
return D_READ_ONLY;
- case -LINUX_EWOULDBLOCK:
+ case -EWOULDBLOCK:
return D_WOULD_BLOCK;
- case -LINUX_ENOMEM:
+ case -ENOMEM:
return D_NO_MEMORY;
default:
@@ -123,6 +121,8 @@ linux_to_mach_error (int err)
int
issig ()
{
+ if (!current_thread())
+ return 0;
return current_thread ()->wait_result != THREAD_AWAKENED;
}
@@ -148,7 +148,7 @@ verify_area (int rw, const void *p, unsigned long size)
|| (entry->protection & prot) != prot)
{
vm_map_unlock_read (current_map ());
- return -LINUX_EFAULT;
+ return -EFAULT;
}
if (entry->vme_end - entry->vme_start >= len)
break;
@@ -229,7 +229,12 @@ add_blkdev_randomness (int major)
void
do_gettimeofday (struct timeval *tv)
{
- host_get_time (1, tv);
+ /*
+ * XXX: The first argument should be mach_host_self (), but that's too
+ * expensive, and the host argument is not used by host_get_time (),
+ * only checked not to be HOST_NULL.
+ */
+ host_get_time ((host_t) 1, (time_value_t *) tv);
}
int
diff --git a/linux/dev/glue/net.c b/linux/dev/glue/net.c
index 095428d3..15732737 100644
--- a/linux/dev/glue/net.c
+++ b/linux/dev/glue/net.c
@@ -61,6 +61,7 @@
#include <sys/types.h>
#include <machine/spl.h>
+#include <machine/vm_param.h>
#include <mach/mach_types.h>
#include <mach/kern_return.h>
@@ -69,6 +70,9 @@
#include <mach/vm_param.h>
#include <mach/notify.h>
+#include <kern/kalloc.h>
+#include <kern/printf.h>
+
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
@@ -84,6 +88,7 @@
#include <device/net_io.h>
#include <device/device_reply.user.h>
#include <device/device_emul.h>
+#include <device/ds_routines.h>
#define MACH_INCLUDE
#include <linux/kernel.h>
@@ -97,7 +102,7 @@
#include <linux/etherdevice.h>
#include <linux/wireless.h>
-extern int linux_intr_pri;
+#include <linux/dev/glue/glue.h>
/* One of these is associated with each instance of a device. */
struct net_data
@@ -248,7 +253,6 @@ void
dev_kfree_skb (struct sk_buff *skb, int mode)
{
unsigned flags;
- extern void *io_done_list;
/* Queue sk_buff on done list if there is a
page list attached or we need to send a reply.
@@ -425,7 +429,6 @@ device_write (void *d, ipc_port_t reply_port,
{
unsigned char *p;
int i, amt, skblen, s;
- io_return_t err = 0;
vm_map_copy_t copy = (vm_map_copy_t) data;
struct net_data *nd = d;
struct linux_device *dev = nd->dev;
@@ -447,7 +450,7 @@ device_write (void *d, ipc_port_t reply_port,
assert (copy->cpy_npages == 1);
skb->copy = copy;
- skb->data = ((void *) copy->cpy_page_list[0]->phys_addr
+ skb->data = ((void *) phystokv(copy->cpy_page_list[0]->phys_addr)
+ (copy->offset & PAGE_MASK));
skb->len = count;
skb->head = skb->data;
@@ -461,7 +464,7 @@ device_write (void *d, ipc_port_t reply_port,
skb->end = skb->tail;
memcpy (skb->data,
- ((void *) copy->cpy_page_list[0]->phys_addr
+ ((void *) phystokv(copy->cpy_page_list[0]->phys_addr)
+ (copy->offset & PAGE_MASK)),
amt);
count -= amt;
@@ -471,7 +474,7 @@ device_write (void *d, ipc_port_t reply_port,
amt = PAGE_SIZE;
if (amt > count)
amt = count;
- memcpy (p, (void *) copy->cpy_page_list[i]->phys_addr, amt);
+ memcpy (p, (void *) phystokv(copy->cpy_page_list[i]->phys_addr), amt);
count -= amt;
p += amt;
}
diff --git a/linux/dev/include/asm-i386/errno.h b/linux/dev/include/asm-i386/errno.h
deleted file mode 100644
index 1683367a..00000000
--- a/linux/dev/include/asm-i386/errno.h
+++ /dev/null
@@ -1,266 +0,0 @@
-#ifndef _I386_ERRNO_H
-#define _I386_ERRNO_H
-
-#ifdef MACH_INCLUDE
-
-#define LINUX_EPERM 1 /* Operation not permitted */
-#define LINUX_ENOENT 2 /* No such file or directory */
-#define LINUX_ESRCH 3 /* No such process */
-#define LINUX_EINTR 4 /* Interrupted system call */
-#define LINUX_EIO 5 /* I/O error */
-#define LINUX_ENXIO 6 /* No such device or address */
-#define LINUX_E2BIG 7 /* Arg list too long */
-#define LINUX_ENOEXEC 8 /* Exec format error */
-#define LINUX_EBADF 9 /* Bad file number */
-#define LINUX_ECHILD 10 /* No child processes */
-#define LINUX_EAGAIN 11 /* Try again */
-#define LINUX_ENOMEM 12 /* Out of memory */
-#define LINUX_EACCES 13 /* Permission denied */
-#define LINUX_EFAULT 14 /* Bad address */
-#define LINUX_ENOTBLK 15 /* Block device required */
-#define LINUX_EBUSY 16 /* Device or resource busy */
-#define LINUX_EEXIST 17 /* File exists */
-#define LINUX_EXDEV 18 /* Cross-device link */
-#define LINUX_ENODEV 19 /* No such device */
-#define LINUX_ENOTDIR 20 /* Not a directory */
-#define LINUX_EISDIR 21 /* Is a directory */
-#define LINUX_EINVAL 22 /* Invalid argument */
-#define LINUX_ENFILE 23 /* File table overflow */
-#define LINUX_EMFILE 24 /* Too many open files */
-#define LINUX_ENOTTY 25 /* Not a typewriter */
-#define LINUX_ETXTBSY 26 /* Text file busy */
-#define LINUX_EFBIG 27 /* File too large */
-#define LINUX_ENOSPC 28 /* No space left on device */
-#define LINUX_ESPIPE 29 /* Illegal seek */
-#define LINUX_EROFS 30 /* Read-only file system */
-#define LINUX_EMLINK 31 /* Too many links */
-#define LINUX_EPIPE 32 /* Broken pipe */
-#define LINUX_EDOM 33 /* Math argument out of domain of func */
-#define LINUX_ERANGE 34 /* Math result not representable */
-#define LINUX_EDEADLK 35 /* Resource deadlock would occur */
-#define LINUX_ENAMETOOLONG 36 /* File name too long */
-#define LINUX_ENOLCK 37 /* No record locks available */
-#define LINUX_ENOSYS 38 /* Function not implemented */
-#define LINUX_ENOTEMPTY 39 /* Directory not empty */
-#define LINUX_ELOOP 40 /* Too many symbolic links encountered */
-#define LINUX_EWOULDBLOCK LINUX_EAGAIN /* Operation would block */
-#define LINUX_ENOMSG 42 /* No message of desired type */
-#define LINUX_EIDRM 43 /* Identifier removed */
-#define LINUX_ECHRNG 44 /* Channel number out of range */
-#define LINUX_EL2NSYNC 45 /* Level 2 not synchronized */
-#define LINUX_EL3HLT 46 /* Level 3 halted */
-#define LINUX_EL3RST 47 /* Level 3 reset */
-#define LINUX_ELNRNG 48 /* Link number out of range */
-#define LINUX_EUNATCH 49 /* Protocol driver not attached */
-#define LINUX_ENOCSI 50 /* No CSI structure available */
-#define LINUX_EL2HLT 51 /* Level 2 halted */
-#define LINUX_EBADE 52 /* Invalid exchange */
-#define LINUX_EBADR 53 /* Invalid request descriptor */
-#define LINUX_EXFULL 54 /* Exchange full */
-#define LINUX_ENOANO 55 /* No anode */
-#define LINUX_EBADRQC 56 /* Invalid request code */
-#define LINUX_EBADSLT 57 /* Invalid slot */
-
-#define LINUX_EDEADLOCK LINUX_EDEADLK
-
-#define LINUX_EBFONT 59 /* Bad font file format */
-#define LINUX_ENOSTR 60 /* Device not a stream */
-#define LINUX_ENODATA 61 /* No data available */
-#define LINUX_ETIME 62 /* Timer expired */
-#define LINUX_ENOSR 63 /* Out of streams resources */
-#define LINUX_ENONET 64 /* Machine is not on the network */
-#define LINUX_ENOPKG 65 /* Package not installed */
-#define LINUX_EREMOTE 66 /* Object is remote */
-#define LINUX_ENOLINK 67 /* Link has been severed */
-#define LINUX_EADV 68 /* Advertise error */
-#define LINUX_ESRMNT 69 /* Srmount error */
-#define LINUX_ECOMM 70 /* Communication error on send */
-#define LINUX_EPROTO 71 /* Protocol error */
-#define LINUX_EMULTIHOP 72 /* Multihop attempted */
-#define LINUX_EDOTDOT 73 /* RFS specific error */
-#define LINUX_EBADMSG 74 /* Not a data message */
-#define LINUX_EOVERFLOW 75 /* Value too large for defined data type */
-#define LINUX_ENOTUNIQ 76 /* Name not unique on network */
-#define LINUX_EBADFD 77 /* File descriptor in bad state */
-#define LINUX_EREMCHG 78 /* Remote address changed */
-#define LINUX_ELIBACC 79 /* Can not access a needed shared library */
-#define LINUX_ELIBBAD 80 /* Accessing a corrupted shared library */
-#define LINUX_ELIBSCN 81 /* .lib section in a.out corrupted */
-#define LINUX_ELIBMAX 82 /* Attempting to link in too many shared libraries */
-#define LINUX_ELIBEXEC 83 /* Cannot exec a shared library directly */
-#define LINUX_EILSEQ 84 /* Illegal byte sequence */
-#define LINUX_ERESTART 85 /* Interrupted system call should be restarted */
-#define LINUX_ESTRPIPE 86 /* Streams pipe error */
-#define LINUX_EUSERS 87 /* Too many users */
-#define LINUX_ENOTSOCK 88 /* Socket operation on non-socket */
-#define LINUX_EDESTADDRREQ 89 /* Destination address required */
-#define LINUX_EMSGSIZE 90 /* Message too long */
-#define LINUX_EPROTOTYPE 91 /* Protocol wrong type for socket */
-#define LINUX_ENOPROTOOPT 92 /* Protocol not available */
-#define LINUX_EPROTONOSUPPORT 93 /* Protocol not supported */
-#define LINUX_ESOCKTNOSUPPORT 94 /* Socket type not supported */
-#define LINUX_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
-#define LINUX_EPFNOSUPPORT 96 /* Protocol family not supported */
-#define LINUX_EAFNOSUPPORT 97 /* Address family not supported by protocol */
-#define LINUX_EADDRINUSE 98 /* Address already in use */
-#define LINUX_EADDRNOTAVAIL 99 /* Cannot assign requested address */
-#define LINUX_ENETDOWN 100 /* Network is down */
-#define LINUX_ENETUNREACH 101 /* Network is unreachable */
-#define LINUX_ENETRESET 102 /* Network dropped connection because of reset */
-#define LINUX_ECONNABORTED 103 /* Software caused connection abort */
-#define LINUX_ECONNRESET 104 /* Connection reset by peer */
-#define LINUX_ENOBUFS 105 /* No buffer space available */
-#define LINUX_EISCONN 106 /* Transport endpoint is already connected */
-#define LINUX_ENOTCONN 107 /* Transport endpoint is not connected */
-#define LINUX_ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
-#define LINUX_ETOOMANYREFS 109 /* Too many references: cannot splice */
-#define LINUX_ETIMEDOUT 110 /* Connection timed out */
-#define LINUX_ECONNREFUSED 111 /* Connection refused */
-#define LINUX_EHOSTDOWN 112 /* Host is down */
-#define LINUX_EHOSTUNREACH 113 /* No route to host */
-#define LINUX_EALREADY 114 /* Operation already in progress */
-#define LINUX_EINPROGRESS 115 /* Operation now in progress */
-#define LINUX_ESTALE 116 /* Stale NFS file handle */
-#define LINUX_EUCLEAN 117 /* Structure needs cleaning */
-#define LINUX_ENOTNAM 118 /* Not a XENIX named type file */
-#define LINUX_ENAVAIL 119 /* No XENIX semaphores available */
-#define LINUX_EISNAM 120 /* Is a named type file */
-#define LINUX_EREMOTEIO 121 /* Remote I/O error */
-#define LINUX_EDQUOT 122 /* Quota exceeded */
-
-#define LINUX_ENOMEDIUM 123 /* No medium found */
-#define LINUX_EMEDIUMTYPE 124 /* Wrong medium type */
-
-#else /* !MACH_INCLUDE */
-
-#define EPERM 1 /* Operation not permitted */
-#define ENOENT 2 /* No such file or directory */
-#define ESRCH 3 /* No such process */
-#define EINTR 4 /* Interrupted system call */
-#define EIO 5 /* I/O error */
-#define ENXIO 6 /* No such device or address */
-#define E2BIG 7 /* Arg list too long */
-#define ENOEXEC 8 /* Exec format error */
-#define EBADF 9 /* Bad file number */
-#define ECHILD 10 /* No child processes */
-#define EAGAIN 11 /* Try again */
-#define ENOMEM 12 /* Out of memory */
-#define EACCES 13 /* Permission denied */
-#define EFAULT 14 /* Bad address */
-#define ENOTBLK 15 /* Block device required */
-#define EBUSY 16 /* Device or resource busy */
-#define EEXIST 17 /* File exists */
-#define EXDEV 18 /* Cross-device link */
-#define ENODEV 19 /* No such device */
-#define ENOTDIR 20 /* Not a directory */
-#define EISDIR 21 /* Is a directory */
-#define EINVAL 22 /* Invalid argument */
-#define ENFILE 23 /* File table overflow */
-#define EMFILE 24 /* Too many open files */
-#define ENOTTY 25 /* Not a typewriter */
-#define ETXTBSY 26 /* Text file busy */
-#define EFBIG 27 /* File too large */
-#define ENOSPC 28 /* No space left on device */
-#define ESPIPE 29 /* Illegal seek */
-#define EROFS 30 /* Read-only file system */
-#define EMLINK 31 /* Too many links */
-#define EPIPE 32 /* Broken pipe */
-#define EDOM 33 /* Math argument out of domain of func */
-#define ERANGE 34 /* Math result not representable */
-#define EDEADLK 35 /* Resource deadlock would occur */
-#define ENAMETOOLONG 36 /* File name too long */
-#define ENOLCK 37 /* No record locks available */
-#define ENOSYS 38 /* Function not implemented */
-#define ENOTEMPTY 39 /* Directory not empty */
-#define ELOOP 40 /* Too many symbolic links encountered */
-#define EWOULDBLOCK EAGAIN /* Operation would block */
-#define ENOMSG 42 /* No message of desired type */
-#define EIDRM 43 /* Identifier removed */
-#define ECHRNG 44 /* Channel number out of range */
-#define EL2NSYNC 45 /* Level 2 not synchronized */
-#define EL3HLT 46 /* Level 3 halted */
-#define EL3RST 47 /* Level 3 reset */
-#define ELNRNG 48 /* Link number out of range */
-#define EUNATCH 49 /* Protocol driver not attached */
-#define ENOCSI 50 /* No CSI structure available */
-#define EL2HLT 51 /* Level 2 halted */
-#define EBADE 52 /* Invalid exchange */
-#define EBADR 53 /* Invalid request descriptor */
-#define EXFULL 54 /* Exchange full */
-#define ENOANO 55 /* No anode */
-#define EBADRQC 56 /* Invalid request code */
-#define EBADSLT 57 /* Invalid slot */
-
-#define EDEADLOCK EDEADLK
-
-#define EBFONT 59 /* Bad font file format */
-#define ENOSTR 60 /* Device not a stream */
-#define ENODATA 61 /* No data available */
-#define ETIME 62 /* Timer expired */
-#define ENOSR 63 /* Out of streams resources */
-#define ENONET 64 /* Machine is not on the network */
-#define ENOPKG 65 /* Package not installed */
-#define EREMOTE 66 /* Object is remote */
-#define ENOLINK 67 /* Link has been severed */
-#define EADV 68 /* Advertise error */
-#define ESRMNT 69 /* Srmount error */
-#define ECOMM 70 /* Communication error on send */
-#define EPROTO 71 /* Protocol error */
-#define EMULTIHOP 72 /* Multihop attempted */
-#define EDOTDOT 73 /* RFS specific error */
-#define EBADMSG 74 /* Not a data message */
-#define EOVERFLOW 75 /* Value too large for defined data type */
-#define ENOTUNIQ 76 /* Name not unique on network */
-#define EBADFD 77 /* File descriptor in bad state */
-#define EREMCHG 78 /* Remote address changed */
-#define ELIBACC 79 /* Can not access a needed shared library */
-#define ELIBBAD 80 /* Accessing a corrupted shared library */
-#define ELIBSCN 81 /* .lib section in a.out corrupted */
-#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
-#define ELIBEXEC 83 /* Cannot exec a shared library directly */
-#define EILSEQ 84 /* Illegal byte sequence */
-#define ERESTART 85 /* Interrupted system call should be restarted */
-#define ESTRPIPE 86 /* Streams pipe error */
-#define EUSERS 87 /* Too many users */
-#define ENOTSOCK 88 /* Socket operation on non-socket */
-#define EDESTADDRREQ 89 /* Destination address required */
-#define EMSGSIZE 90 /* Message too long */
-#define EPROTOTYPE 91 /* Protocol wrong type for socket */
-#define ENOPROTOOPT 92 /* Protocol not available */
-#define EPROTONOSUPPORT 93 /* Protocol not supported */
-#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
-#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
-#define EPFNOSUPPORT 96 /* Protocol family not supported */
-#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
-#define EADDRINUSE 98 /* Address already in use */
-#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
-#define ENETDOWN 100 /* Network is down */
-#define ENETUNREACH 101 /* Network is unreachable */
-#define ENETRESET 102 /* Network dropped connection because of reset */
-#define ECONNABORTED 103 /* Software caused connection abort */
-#define ECONNRESET 104 /* Connection reset by peer */
-#define ENOBUFS 105 /* No buffer space available */
-#define EISCONN 106 /* Transport endpoint is already connected */
-#define ENOTCONN 107 /* Transport endpoint is not connected */
-#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
-#define ETOOMANYREFS 109 /* Too many references: cannot splice */
-#define ETIMEDOUT 110 /* Connection timed out */
-#define ECONNREFUSED 111 /* Connection refused */
-#define EHOSTDOWN 112 /* Host is down */
-#define EHOSTUNREACH 113 /* No route to host */
-#define EALREADY 114 /* Operation already in progress */
-#define EINPROGRESS 115 /* Operation now in progress */
-#define ESTALE 116 /* Stale NFS file handle */
-#define EUCLEAN 117 /* Structure needs cleaning */
-#define ENOTNAM 118 /* Not a XENIX named type file */
-#define ENAVAIL 119 /* No XENIX semaphores available */
-#define EISNAM 120 /* Is a named type file */
-#define EREMOTEIO 121 /* Remote I/O error */
-#define EDQUOT 122 /* Quota exceeded */
-
-#define ENOMEDIUM 123 /* No medium found */
-#define EMEDIUMTYPE 124 /* Wrong medium type */
-
-#endif /* !MACH_INCLUDE */
-
-#endif
diff --git a/linux/dev/include/asm-i386/page.h b/linux/dev/include/asm-i386/page.h
index 2bb6837e..be818481 100644
--- a/linux/dev/include/asm-i386/page.h
+++ b/linux/dev/include/asm-i386/page.h
@@ -1,12 +1,7 @@
#ifndef _I386_PAGE_H
#define _I386_PAGE_H
-#ifndef MACH_INCLUDE
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-#endif
+#include <mach/vm_param.h>
#ifdef __KERNEL__
diff --git a/linux/dev/include/asm-i386/system.h b/linux/dev/include/asm-i386/system.h
index 3e17d2d1..f26a33e7 100644
--- a/linux/dev/include/asm-i386/system.h
+++ b/linux/dev/include/asm-i386/system.h
@@ -224,9 +224,9 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define __sti() __asm__ __volatile__ ("sti": : :"memory")
#define __cli() __asm__ __volatile__ ("cli": : :"memory")
#define __save_flags(x) \
-__asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
+__asm__ __volatile__("pushf ; pop %0" : "=r" (x): /* no input */ :"memory")
#define __restore_flags(x) \
-__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
+__asm__ __volatile__("push %0 ; popf": /* no output */ :"g" (x):"memory")
#ifdef __SMP__
diff --git a/linux/dev/include/linux/blk.h b/linux/dev/include/linux/blk.h
index 852853c5..412b8641 100644
--- a/linux/dev/include/linux/blk.h
+++ b/linux/dev/include/linux/blk.h
@@ -385,7 +385,9 @@ static void end_request(int uptodate) {
struct request *req = CURRENT;
#endif /* IDE_DRIVER */
struct buffer_head * bh;
+#ifndef MACH
int nsect;
+#endif
req->errors = 0;
if (!uptodate) {
diff --git a/linux/dev/include/linux/kernel.h b/linux/dev/include/linux/kernel.h
index aef73ac3..4db76acc 100644
--- a/linux/dev/include/linux/kernel.h
+++ b/linux/dev/include/linux/kernel.h
@@ -9,9 +9,7 @@
#include <stdarg.h>
#include <linux/linkage.h>
-
-/* Optimization barrier */
-#define barrier() __asm__("": : :"memory")
+#include <linux/compiler.h>
#define INT_MAX ((int)(~0U>>1))
#define UINT_MAX (~0U)
diff --git a/linux/dev/include/linux/notifier.h b/linux/dev/include/linux/notifier.h
index eede20f0..b3c9ccf6 100644
--- a/linux/dev/include/linux/notifier.h
+++ b/linux/dev/include/linux/notifier.h
@@ -55,11 +55,7 @@ extern __inline__ int notifier_chain_unregister(struct notifier_block **nl, stru
}
nl=&((*nl)->next);
}
-#ifdef MACH_INCLUDE
- return -LINUX_ENOENT;
-#else
return -ENOENT;
-#endif
}
/*
diff --git a/linux/dev/init/main.c b/linux/dev/init/main.c
index 9ed35d3d..ecbd0b68 100644
--- a/linux/dev/init/main.c
+++ b/linux/dev/init/main.c
@@ -39,6 +39,7 @@
#include <machine/spl.h>
#include <machine/pmap.h>
#include <machine/vm_param.h>
+#include <machine/model_dep.h>
#define MACH_INCLUDE
#include <linux/sched.h>
@@ -47,6 +48,8 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/dev/glue/glue.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -83,30 +86,6 @@ struct drive_info_struct
*/
static void calibrate_delay (void);
-extern vm_offset_t phys_last_addr;
-
-extern void *alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
-extern void free_contig_mem (vm_page_t);
-extern void init_IRQ (void);
-extern void restore_IRQ (void);
-extern void startrtclock (void);
-extern void linux_version_init (void);
-extern void linux_kmem_init (void);
-extern unsigned long pci_init (unsigned long, unsigned long);
-extern void linux_net_emulation_init (void);
-extern void device_setup (void);
-extern void linux_printk (char *,...);
-extern int linux_timer_intr (void);
-extern spl_t spl0 (void);
-extern spl_t splhigh (void);
-extern void form_pic_mask (void);
-extern int linux_bad_intr (int);
-extern int prtnull ();
-extern int intnull ();
-extern void linux_sched_init (void);
-extern void pcmcia_init (void);
-
-
/*
* Amount of contiguous memory to allocate for initialization.
*/
@@ -170,7 +149,7 @@ linux_init (void)
/*
* Free unused memory.
*/
- while (pages && pages->phys_addr < round_page (memory_start))
+ while (pages && phystokv(pages->phys_addr) < round_page (memory_start))
pages = (vm_page_t) pages->pageq.next;
if (pages)
free_contig_mem (pages);
@@ -317,7 +296,7 @@ alloc_contig_mem (unsigned size, unsigned limit,
kfree ((vm_offset_t) bits, bits_len);
if (pages)
*pages = page_list;
- return (m);
+ return phystokv(m);
}
/*
diff --git a/linux/dev/kernel/dma.c b/linux/dev/kernel/dma.c
index 4b569780..bbda4bbf 100644
--- a/linux/dev/kernel/dma.c
+++ b/linux/dev/kernel/dma.c
@@ -80,10 +80,10 @@ int
request_dma (unsigned int dmanr, const char *device_id)
{
if (dmanr >= MAX_DMA_CHANNELS)
- return -LINUX_EINVAL;
+ return -EINVAL;
if (xchg (&dma_chan_busy[dmanr].lock, 1) != 0)
- return -LINUX_EBUSY;
+ return -EBUSY;
dma_chan_busy[dmanr].device_id = device_id;
diff --git a/linux/dev/kernel/printk.c b/linux/dev/kernel/printk.c
index 1c45b245..7c65d303 100644
--- a/linux/dev/kernel/printk.c
+++ b/linux/dev/kernel/printk.c
@@ -27,6 +27,7 @@
#include <stdarg.h>
#include <asm/system.h>
#include <kern/assert.h>
+#include <kern/printf.h>
#include <device/cons.h>
static char buf[2048];
@@ -40,7 +41,8 @@ int
printk (char *fmt, ...)
{
va_list args;
- int n, flags;
+ int n;
+ unsigned long flags;
char *p, *msg, *buf_end;
static int msg_level = -1;
diff --git a/linux/dev/kernel/resource.c b/linux/dev/kernel/resource.c
index 7a187552..ba107e8e 100644
--- a/linux/dev/kernel/resource.c
+++ b/linux/dev/kernel/resource.c
@@ -131,7 +131,7 @@ release_region (unsigned int from, unsigned int num)
int
check_region (unsigned int from, unsigned int num)
{
- return (find_gap (&iolist, from, num) == NULL) ? -LINUX_EBUSY : 0;
+ return (find_gap (&iolist, from, num) == NULL) ? -EBUSY : 0;
}
/* Called from init/main.c to reserve IO ports. */
diff --git a/linux/dev/kernel/sched.c b/linux/dev/kernel/sched.c
index 792c2dae..87906a45 100644
--- a/linux/dev/kernel/sched.c
+++ b/linux/dev/kernel/sched.c
@@ -34,6 +34,9 @@
#include <kern/thread.h>
#include <kern/sched_prim.h>
+#include <kern/printf.h>
+
+#include <machine/machspl.h>
#define MACH_INCLUDE
#include <linux/sched.h>
@@ -41,21 +44,13 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
+#include <linux/dev/glue/glue.h>
#include <asm/system.h>
#include <asm/atomic.h>
int securelevel = 0;
-extern void *alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
-extern void free_contig_mem (vm_page_t);
-extern spl_t splhigh (void);
-extern spl_t splx (spl_t);
-extern void linux_soft_intr (void);
-extern int issig (void);
-extern int printf (const char *, ...);
-extern int linux_auto_config;
-
static void timer_bh (void);
DECLARE_TASK_QUEUE (tq_timer);
@@ -165,7 +160,7 @@ __do_down (struct semaphore *sem, int task_state)
if (task_state == TASK_INTERRUPTIBLE && issig ())
{
- ret = -LINUX_EINTR;
+ ret = -EINTR;
atomic_inc (&sem->count);
break;
}
@@ -185,7 +180,7 @@ __do_down (struct semaphore *sem, int task_state)
{
if (task_state == TASK_INTERRUPTIBLE && issig ())
{
- ret = -LINUX_EINTR;
+ ret = -EINTR;
atomic_inc (&sem->count);
break;
}
@@ -326,16 +321,6 @@ schedule (void)
}
void
-cdrom_sleep (int t)
-{
- int xxx;
-
- assert_wait ((event_t) &xxx, TRUE);
- thread_set_timeout (t);
- schedule ();
-}
-
-void
linux_sched_init (void)
{
/*
diff --git a/linux/dev/kernel/softirq.c b/linux/dev/kernel/softirq.c
index 96102a7a..ac95a7dd 100644
--- a/linux/dev/kernel/softirq.c
+++ b/linux/dev/kernel/softirq.c
@@ -13,16 +13,18 @@
#include <linux/interrupt.h>
#include <asm/system.h>
+#include <linux/dev/glue/glue.h>
+
int bh_mask_count[32];
-unsigned long bh_active = 0;
-unsigned long bh_mask = 0;
+unsigned int bh_active = 0;
+unsigned int bh_mask = 0;
void (*bh_base[32]) (void);
void
linux_soft_intr (void)
{
- unsigned long active;
- unsigned long mask, left;
+ unsigned int active;
+ unsigned int mask, left;
void (**bh) (void);
sti ();
@@ -42,5 +44,5 @@ linux_soft_intr (void)
}
return;
bad_bh:
- printk ("linux_soft_intr:bad interrupt handler entry %08lx\n", mask);
+ printk ("linux_soft_intr:bad interrupt handler entry %08x\n", mask);
}
diff --git a/linux/dev/lib/vsprintf.c b/linux/dev/lib/vsprintf.c
index 0beb7471..541ec650 100644
--- a/linux/dev/lib/vsprintf.c
+++ b/linux/dev/lib/vsprintf.c
@@ -328,9 +328,9 @@ linux_vsprintf (char *buf, const char *fmt, va_list args)
num = va_arg (args, unsigned long);
else if (qualifier == 'h')
if (flags & SIGN)
- num = va_arg (args, short);
+ num = (short) va_arg (args, int);
else
- num = va_arg (args, unsigned short);
+ num = (unsigned short) va_arg (args, unsigned int);
else if (flags & SIGN)
num = va_arg (args, int);
else
diff --git a/linux/pcmcia-cs/glue/wireless_glue.h b/linux/pcmcia-cs/glue/wireless_glue.h
index 05eef4ba..61006b4a 100644
--- a/linux/pcmcia-cs/glue/wireless_glue.h
+++ b/linux/pcmcia-cs/glue/wireless_glue.h
@@ -48,14 +48,7 @@
#endif
-/*
- * We compile everything directly into the GNU Mach kernel, there are no
- * modules.
- */
-#define SET_MODULE_OWNER(a) do{ } while(0)
-#define EXPORT_SYMBOL(a)
-
-
+#include <kern/debug.h>
/*
* We need some `schedule_task' replacement. This is defined in
diff --git a/linux/src/arch/i386/kernel/bios32.c b/linux/src/arch/i386/kernel/bios32.c
index 0b357be0..b069ce46 100644
--- a/linux/src/arch/i386/kernel/bios32.c
+++ b/linux/src/arch/i386/kernel/bios32.c
@@ -166,7 +166,7 @@ static unsigned long bios32_service(unsigned long service)
unsigned long flags;
save_flags(flags); cli();
- __asm__("lcall *(%%edi)"
+ __asm__("lcall *(%%edi); cld"
: "=a" (return_code),
"=b" (address),
"=c" (length),
@@ -206,10 +206,10 @@ static int check_pcibios(void)
int pack;
if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
- pci_indirect.address = pcibios_entry;
+ pci_indirect.address = phystokv(pcibios_entry);
save_flags(flags); cli();
- __asm__("lcall *(%%edi)\n\t"
+ __asm__("lcall *(%%edi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:\tshl $8, %%eax\n\t"
@@ -254,7 +254,7 @@ static int pci_bios_find_class (unsigned int class_code, unsigned short index,
unsigned long flags;
save_flags(flags); cli();
- __asm__ ("lcall *(%%edi)\n\t"
+ __asm__ ("lcall *(%%edi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
@@ -279,7 +279,7 @@ static int pci_bios_find_device (unsigned short vendor, unsigned short device_id
unsigned long flags;
save_flags(flags); cli();
- __asm__("lcall *(%%edi)\n\t"
+ __asm__("lcall *(%%edi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
@@ -304,7 +304,7 @@ static int pci_bios_read_config_byte(unsigned char bus,
unsigned long flags;
save_flags(flags); cli();
- __asm__("lcall *(%%esi)\n\t"
+ __asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
@@ -326,7 +326,7 @@ static int pci_bios_read_config_word (unsigned char bus,
unsigned long flags;
save_flags(flags); cli();
- __asm__("lcall *(%%esi)\n\t"
+ __asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
@@ -348,7 +348,7 @@ static int pci_bios_read_config_dword (unsigned char bus,
unsigned long flags;
save_flags(flags); cli();
- __asm__("lcall *(%%esi)\n\t"
+ __asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
@@ -370,7 +370,7 @@ static int pci_bios_write_config_byte (unsigned char bus,
unsigned long flags;
save_flags(flags); cli();
- __asm__("lcall *(%%esi)\n\t"
+ __asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
@@ -392,7 +392,7 @@ static int pci_bios_write_config_word (unsigned char bus,
unsigned long flags;
save_flags(flags); cli();
- __asm__("lcall *(%%esi)\n\t"
+ __asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
@@ -414,7 +414,7 @@ static int pci_bios_write_config_dword (unsigned char bus,
unsigned long flags;
save_flags(flags); cli();
- __asm__("lcall *(%%esi)\n\t"
+ __asm__("lcall *(%%esi); cld\n\t"
"jc 1f\n\t"
"xor %%ah, %%ah\n"
"1:"
@@ -903,7 +903,7 @@ unsigned long pcibios_init(unsigned long memory_start, unsigned long memory_end)
} else {
bios32_entry = check->fields.entry;
printk ("pcibios_init : BIOS32 Service Directory entry at 0x%lx\n", bios32_entry);
- bios32_indirect.address = bios32_entry;
+ bios32_indirect.address = phystokv(bios32_entry);
}
}
}
diff --git a/linux/src/drivers/block/floppy.c b/linux/src/drivers/block/floppy.c
index 0314a0b7..1b96c44f 100644
--- a/linux/src/drivers/block/floppy.c
+++ b/linux/src/drivers/block/floppy.c
@@ -177,6 +177,8 @@ static inline int __get_order(unsigned long size);
#include <linux/blk.h>
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
+#include <linux/dev/glue/glue.h>
+
#ifndef FLOPPY_MOTOR_MASK
#define FLOPPY_MOTOR_MASK 0xf0
@@ -4167,8 +4169,6 @@ static void floppy_release_irq_and_dma(void)
#ifdef MODULE
-extern char *get_options(char *str, int *ints);
-
char *floppy=NULL;
static void parse_floppy_cfg_string(char *cfg)
diff --git a/linux/src/drivers/block/ide-cd.c b/linux/src/drivers/block/ide-cd.c
index 17f60b94..e4548f54 100644
--- a/linux/src/drivers/block/ide-cd.c
+++ b/linux/src/drivers/block/ide-cd.c
@@ -153,6 +153,10 @@
/***************************************************************************/
+#ifdef MACH
+#include <kern/sched_prim.h>
+#endif
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/delay.h>
@@ -626,7 +630,7 @@ static void cdrom_queue_request_sense (ide_drive_t *drive,
pc->c[0] = REQUEST_SENSE;
pc->c[4] = len;
- pc->buffer = (char *)reqbuf;
+ pc->buffer = (unsigned char *)reqbuf;
pc->buflen = len;
pc->sense_data = (struct atapi_request_sense *)failed_command;
@@ -844,7 +848,7 @@ static int cdrom_start_packet_command (ide_drive_t *drive, int xferlen,
HANDLER is the interrupt handler to call when the command completes
or there's data ready. */
static int cdrom_transfer_packet_command (ide_drive_t *drive,
- char *cmd_buf, int cmd_len,
+ unsigned char *cmd_buf, int cmd_len,
ide_handler_t *handler)
{
if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
@@ -1379,10 +1383,19 @@ static void cdrom_do_packet_command (ide_drive_t *drive)
cdrom_start_packet_command (drive, len, cdrom_do_pc_continuation);
}
-
-#ifndef MACH
/* Sleep for TIME jiffies.
Not to be called from an interrupt handler. */
+#ifdef MACH
+static
+void cdrom_sleep (int time)
+{
+ int xxx;
+
+ assert_wait ((event_t) &xxx, TRUE);
+ thread_set_timeout (time);
+ schedule ();
+}
+#else
static
void cdrom_sleep (int time)
{
@@ -1660,7 +1673,7 @@ cdrom_read_capacity (ide_drive_t *drive, unsigned *capacity,
pc.sense_data = reqbuf;
pc.c[0] = READ_CAPACITY;
- pc.buffer = (char *)&capbuf;
+ pc.buffer = (unsigned char *)&capbuf;
pc.buflen = sizeof (capbuf);
stat = cdrom_queue_packet_command (drive, &pc);
@@ -1681,7 +1694,7 @@ cdrom_read_tocentry (ide_drive_t *drive, int trackno, int msf_flag,
memset (&pc, 0, sizeof (pc));
pc.sense_data = reqbuf;
- pc.buffer = buf;
+ pc.buffer = (unsigned char *)buf;
pc.buflen = buflen;
pc.c[0] = SCMD_READ_TOC;
pc.c[6] = trackno;
@@ -1813,7 +1826,7 @@ cdrom_read_subchannel (ide_drive_t *drive, int format,
memset (&pc, 0, sizeof (pc));
pc.sense_data = reqbuf;
- pc.buffer = buf;
+ pc.buffer = (unsigned char *) buf;
pc.buflen = buflen;
pc.c[0] = SCMD_READ_SUBCHANNEL;
pc.c[1] = 2; /* MSF addressing */
@@ -1836,7 +1849,7 @@ cdrom_mode_sense (ide_drive_t *drive, int pageno, int modeflag,
memset (&pc, 0, sizeof (pc));
pc.sense_data = reqbuf;
- pc.buffer = buf;
+ pc.buffer = (unsigned char *)buf;
pc.buflen = buflen;
pc.c[0] = MODE_SENSE_10;
pc.c[2] = pageno | (modeflag << 6);
@@ -1855,7 +1868,7 @@ cdrom_mode_select (ide_drive_t *drive, int pageno, char *buf, int buflen,
memset (&pc, 0, sizeof (pc));
pc.sense_data = reqbuf;
- pc.buffer = buf;
+ pc.buffer = (unsigned char *)buf;
pc.buflen = - buflen;
pc.c[0] = MODE_SELECT_10;
pc.c[1] = 0x10;
@@ -1971,7 +1984,7 @@ cdrom_read_block (ide_drive_t *drive, int format, int lba, int nblocks,
memset (&pc, 0, sizeof (pc));
pc.sense_data = reqbuf;
- pc.buffer = buf;
+ pc.buffer = (unsigned char *)buf;
pc.buflen = buflen;
#if ! STANDARD_ATAPI
@@ -2698,9 +2711,12 @@ void ide_cdrom_setup (ide_drive_t *drive)
CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 0;
if (drive->id != NULL) {
- if (strcmp (drive->id->model, "V003S0DS") == 0 &&
- drive->id->fw_rev[4] == '1' &&
- drive->id->fw_rev[6] <= '2') {
+ const char *model = (const char *)drive->id->model;
+ const char *fw_rev = (const char *)drive->id->fw_rev;
+
+ if (strcmp (model, "V003S0DS") == 0 &&
+ fw_rev[4] == '1' &&
+ fw_rev[6] <= '2') {
/* Vertos 300.
Some versions of this drive like to talk BCD. */
CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd = 1;
@@ -2709,27 +2725,27 @@ void ide_cdrom_setup (ide_drive_t *drive)
CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 1;
}
- else if (strcmp (drive->id->model, "V006E0DS") == 0 &&
- drive->id->fw_rev[4] == '1' &&
- drive->id->fw_rev[6] <= '2') {
+ else if (strcmp (model, "V006E0DS") == 0 &&
+ fw_rev[4] == '1' &&
+ fw_rev[6] <= '2') {
/* Vertos 600 ESD. */
CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd = 1;
}
- else if (strcmp (drive->id->model, "GCD-R580B") == 0)
+ else if (strcmp (model, "GCD-R580B") == 0)
drive->cdrom_info.max_sectors = 124;
- else if (strcmp (drive->id->model,
+ else if (strcmp (model,
"NEC CD-ROM DRIVE:260") == 0 &&
- strcmp (drive->id->fw_rev, "1.01") == 0) {
+ strcmp (fw_rev, "1.01") == 0) {
/* Old NEC260 (not R). */
CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd = 1;
CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 1;
CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 1;
}
- else if (strcmp (drive->id->model, "WEARNES CDD-120") == 0 &&
- strcmp (drive->id->fw_rev, "A1.1") == 0) {
+ else if (strcmp (model, "WEARNES CDD-120") == 0 &&
+ strcmp (fw_rev, "A1.1") == 0) {
/* Wearnes */
CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 1;
CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 1;
@@ -2737,9 +2753,9 @@ void ide_cdrom_setup (ide_drive_t *drive)
/* Sanyo 3 CD changer uses a non-standard command
for CD changing */
- else if ((strcmp(drive->id->model, "CD-ROM CDR-C3 G") == 0) ||
- (strcmp(drive->id->model, "CD-ROM CDR-C3G") == 0) ||
- (strcmp(drive->id->model, "CD-ROM CDR_C36") == 0)) {
+ else if ((strcmp(model, "CD-ROM CDR-C3 G") == 0) ||
+ (strcmp(model, "CD-ROM CDR-C3G") == 0) ||
+ (strcmp(model, "CD-ROM CDR_C36") == 0)) {
/* uses CD in slot 0 when value is set to 3 */
CDROM_STATE_FLAGS (drive)->sanyo_slot = 3;
}
diff --git a/linux/src/drivers/block/ide.c b/linux/src/drivers/block/ide.c
index 18f2e763..7ab790d4 100644
--- a/linux/src/drivers/block/ide.c
+++ b/linux/src/drivers/block/ide.c
@@ -604,6 +604,9 @@ void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int t
*
* Returns: 1 if lba_capacity looks sensible
* 0 otherwise
+ *
+ * Note: we must not change id->cyls here, otherwise a second call
+ * of this routine might no longer find lba_capacity ok.
*/
static int lba_capacity_is_ok (struct hd_driveid *id)
{
@@ -611,10 +614,16 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
unsigned long chs_sects = id->cyls * id->heads * id->sectors;
unsigned long _10_percent = chs_sects / 10;
- /* very large drives (8GB+) may lie about the number of cylinders */
- if (id->cyls == 16383 && id->heads == 16 && id->sectors == 63 && lba_sects > chs_sects) {
+ /*
+ * The ATA spec tells large drives to return
+ * C/H/S = 16383/16/63 independent of their size.
+ * Some drives can be jumpered to use 15 heads instead of 16.
+ */
+ if (id->cyls == 16383 && id->sectors == 63 &&
+ (id->heads == 15 || id->heads == 16) &&
+ id->lba_capacity >= 16383*63*id->heads)
return 1; /* lba_capacity is our only option */
- }
+
/* perform a rough sanity check on lba_sects: within 10% is "okay" */
if ((lba_sects - chs_sects) < _10_percent)
return 1; /* lba_capacity is good */
@@ -631,11 +640,13 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
/*
* current_capacity() returns the capacity (in sectors) of a drive
* according to its current geometry/LBA settings.
+ *
+ * It also sets select.b.lba.
*/
static unsigned long current_capacity (ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
- unsigned long capacity = drive->cyl * drive->head * drive->sect;
+ unsigned long capacity;
if (!drive->present)
return 0;
@@ -645,8 +656,10 @@ static unsigned long current_capacity (ide_drive_t *drive)
#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
if (drive->media != ide_disk)
return 0x7fffffff; /* cdrom or tape */
+
drive->select.b.lba = 0;
/* Determine capacity, and use LBA if the drive properly supports it */
+ capacity = drive->cyl * drive->head * drive->sect;
if (id != NULL && (id->capability & 2) && lba_capacity_is_ok(id)) {
if (id->lba_capacity >= capacity) {
capacity = id->lba_capacity;
@@ -1525,7 +1538,7 @@ static inline void do_rw_disk (ide_drive_t *drive, struct request *rq, unsigned
*/
static void execute_drive_cmd (ide_drive_t *drive, struct request *rq)
{
- byte *args = rq->buffer;
+ byte *args = (byte *)rq->buffer;
if (args) {
#ifdef DEBUG
printk("%s: DRIVE_CMD cmd=0x%02x sc=0x%02x fr=0x%02x xx=0x%02x\n",
@@ -2020,7 +2033,7 @@ static int ide_open(struct inode * inode, struct file * filp)
struct request rq;
check_disk_change(inode->i_rdev);
ide_init_drive_cmd (&rq);
- rq.buffer = door_lock;
+ rq.buffer = (char *)door_lock;
/*
* Ignore the return code from door_lock,
* since the open() has already succeeded,
@@ -2071,7 +2084,7 @@ static void ide_release(struct inode * inode, struct file * file)
struct request rq;
invalidate_buffers(inode->i_rdev);
ide_init_drive_cmd (&rq);
- rq.buffer = door_unlock;
+ rq.buffer = (char *)door_unlock;
(void) ide_do_drive_cmd(drive, &rq, ide_wait);
}
}
@@ -2321,7 +2334,7 @@ static int ide_ioctl (struct inode *inode, struct file *file,
argbuf[3] = args[3];
}
if (!(err = verify_area(VERIFY_WRITE,(void *)arg, argsize))) {
- rq.buffer = argbuf;
+ rq.buffer = (char *)argbuf;
err = ide_do_drive_cmd(drive, &rq, ide_wait);
memcpy_tofs((void *)arg, argbuf, argsize);
}
@@ -2455,7 +2468,7 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
ide_fixstring (id->fw_rev, sizeof(id->fw_rev), bswap);
ide_fixstring (id->serial_no, sizeof(id->serial_no), bswap);
- if (strstr(id->model, "E X A B Y T E N E S T"))
+ if (strstr((char *)id->model, "E X A B Y T E N E S T"))
return;
#ifdef CONFIG_BLK_DEV_IDEATAPI
@@ -2474,9 +2487,12 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
#endif /* CONFIG_BLK_DEV_PROMISE */
if (!drive->ide_scsi) switch (type) {
case 0:
- if (!strstr(id->model, "oppy") && !strstr(id->model, "poyp") && !strstr(id->model, "ZIP"))
+ if (!strstr((char *)id->model, "oppy") &&
+ !strstr((char *)id->model, "poyp") &&
+ !strstr((char *)id->model, "ZIP"))
printk("cdrom or floppy?, assuming ");
- if (drive->media != ide_cdrom && !strstr(id->model, "CD-ROM")) {
+ if (drive->media != ide_cdrom &&
+ !strstr((char *)id->model, "CD-ROM")) {
#ifdef CONFIG_BLK_DEV_IDEFLOPPY
printk("FLOPPY drive\n");
drive->media = ide_floppy;
@@ -2565,8 +2581,7 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
}
/* Handle logical geometry translation by the drive */
if ((id->field_valid & 1) && id->cur_cyls && id->cur_heads
- && (id->cur_heads <= 16) && id->cur_sectors)
- {
+ && (id->cur_heads <= 16) && id->cur_sectors) {
/*
* Extract the physical drive geometry for our use.
* Note that we purposely do *not* update the bios info.
@@ -2591,7 +2606,8 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
}
}
/* Use physical geometry if what we have still makes no sense */
- if ((!drive->head || drive->head > 16) && id->heads && id->heads <= 16) {
+ if ((!drive->head || drive->head > 16) &&
+ id->heads && id->heads <= 16) {
drive->cyl = id->cyls;
drive->head = id->heads;
drive->sect = id->sectors;
@@ -2600,24 +2616,25 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
/* calculate drive capacity, and select LBA if possible */
capacity = current_capacity (drive);
- /* Correct the number of cyls if the bios value is too small */
- if (!drive->forced_geom &&
- capacity > drive->bios_cyl * drive->bios_sect * drive->bios_head) {
- unsigned long cylsize;
- cylsize = drive->bios_sect * drive->bios_head;
- if (cylsize == 0 || capacity/cylsize > 65535) {
- drive->bios_sect = 63;
- drive->bios_head = 255;
- cylsize = 63*255;
- }
- if (capacity/cylsize > 65535)
- drive->bios_cyl = 65535;
- else
- drive->bios_cyl = capacity/cylsize;
- }
-
- if (!strncmp(id->model, "BMI ", 4) &&
- strstr(id->model, " ENHANCED IDE ") &&
+ /*
+ * if possible, give fdisk access to more of the drive,
+ * by correcting bios_cyls:
+ */
+ if (capacity > drive->bios_cyl * drive->bios_head * drive->bios_sect
+ && !drive->forced_geom && drive->bios_sect && drive->bios_head) {
+ int cyl = (capacity / drive->bios_sect) / drive->bios_head;
+ if (cyl <= 65535)
+ drive->bios_cyl = cyl;
+ else {
+ /* OK until 539 GB */
+ drive->bios_sect = 63;
+ drive->bios_head = 255;
+ drive->bios_cyl = capacity / (63*255);
+ }
+ }
+
+ if (!strncmp((char *)id->model, "BMI ", 4) &&
+ strstr((char *)id->model, " ENHANCED IDE ") &&
drive->select.b.lba)
drive->no_geom = 1;
@@ -2856,7 +2873,7 @@ static inline byte probe_for_drive (ide_drive_t *drive)
(void) do_probe(drive, WIN_PIDENTIFY); /* look for ATAPI device */
#endif /* CONFIG_BLK_DEV_IDEATAPI */
}
- if (drive->id && strstr(drive->id->model, "E X A B Y T E N E S T"))
+ if (drive->id && strstr((char *)drive->id->model, "E X A B Y T E N E S T"))
enable_nest(drive);
if (!drive->present)
return 0; /* drive not found */
@@ -3343,7 +3360,7 @@ done:
* This routine is called from the partition-table code in genhd.c
* to "convert" a drive to a logical geometry with fewer than 1024 cyls.
*
- * The second parameter, "xparm", determines exactly how the translation
+ * The second parameter, "xparm", determines exactly how the translation
* will be handled:
* 0 = convert to CHS with fewer than 1024 cyls
* using the same method as Ontrack DiskManager.
@@ -3351,10 +3368,11 @@ done:
* -1 = similar to "0", plus redirect sector 0 to sector 1.
* >1 = convert to a CHS geometry with "xparm" heads.
*
- * Returns 0 if the translation was not possible, if the device was not
+ * Returns 0 if the translation was not possible, if the device was not
* an IDE disk drive, or if a geometry was "forced" on the commandline.
* Returns 1 if the geometry translation was successful.
*/
+
int ide_xlate_1024 (kdev_t i_rdev, int xparm, const char *msg)
{
ide_drive_t *drive;
@@ -3362,7 +3380,11 @@ int ide_xlate_1024 (kdev_t i_rdev, int xparm, const char *msg)
const byte *heads = head_vals;
unsigned long tracks;
- if ((drive = get_info_ptr(i_rdev)) == NULL || drive->forced_geom)
+ drive = get_info_ptr(i_rdev);
+ if (!drive)
+ return 0;
+
+ if (drive->forced_geom)
return 0;
if (xparm > 1 && xparm <= drive->bios_head && drive->bios_sect == 63)
@@ -3370,6 +3392,10 @@ int ide_xlate_1024 (kdev_t i_rdev, int xparm, const char *msg)
printk("%s ", msg);
+ if (xparm < 0 && (drive->bios_cyl * drive->bios_head * drive->bios_sect) < (1024 * 16 * 63)) {
+ return 0; /* small disk: no translation needed */
+ }
+
if (drive->id) {
drive->cyl = drive->id->cyls;
drive->head = drive->id->heads;
@@ -3608,9 +3634,6 @@ static void ide_probe_promise_20246(void)
hwif->ctl_port = io[tmp + 1] + 2;
hwif->noprobe = 0;
}
-#ifdef CONFIG_BLK_DEV_TRITON
- ide_init_promise (bus, fn, &ide_hwifs[2], &ide_hwifs[3], io[4]);
-#endif /* CONFIG_BLK_DEV_TRITON */
}
#endif /* CONFIG_PCI */
@@ -3643,6 +3666,9 @@ static void probe_for_hwifs (void)
ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371_0, &ide_init_triton, 1);
ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_1, &ide_init_triton, 0);
ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229, &ide_init_triton, 0);
#endif /* CONFIG_BLK_DEV_TRITON */
ide_probe_promise_20246();
}
diff --git a/linux/src/drivers/block/ide.h b/linux/src/drivers/block/ide.h
index 44331607..edeedc97 100644
--- a/linux/src/drivers/block/ide.h
+++ b/linux/src/drivers/block/ide.h
@@ -152,7 +152,8 @@ typedef unsigned char byte; /* used everywhere */
/*
* Timeouts for various operations:
*/
-#define WAIT_DRQ (5*HZ/100) /* 50msec - spec allows up to 20ms */
+#define WAIT_DRQ (1*HZ) /* 1s - spec allows up to 20ms, but CF
+ * cards and SSD drives need more */
#ifdef CONFIG_APM
#define WAIT_READY (5*HZ) /* 5sec - some laptops are very slow */
#else
@@ -198,7 +199,7 @@ struct atapi_request_sense {
};
struct packet_command {
- char *buffer;
+ unsigned char *buffer;
int buflen;
int stat;
struct atapi_request_sense *sense_data;
@@ -429,7 +430,8 @@ typedef void (ide_selectproc_t) (ide_drive_t *);
typedef enum { ide_unknown, ide_generic, ide_triton,
ide_cmd640, ide_dtc2278, ide_ali14xx,
ide_qd6580, ide_umc8672, ide_ht6560b,
- ide_promise, ide_promise_udma }
+ ide_promise, ide_hpt343, ide_udma,
+ ide_ultra66 }
hwif_chipset_t;
typedef struct hwif_s {
diff --git a/linux/src/drivers/block/triton.c b/linux/src/drivers/block/triton.c
index 3ed069d9..37eff2b3 100644
--- a/linux/src/drivers/block/triton.c
+++ b/linux/src/drivers/block/triton.c
@@ -1,107 +1,20 @@
/*
* linux/drivers/block/triton.c Version 1.13 Aug 12, 1996
+ * Version 1.13a June 1998 - new chipsets
+ * Version 1.13b July 1998 - DMA blacklist
+ * Version 1.14 June 22, 1999
*
+ * Copyright (c) 1998-1999 Andre Hedrick
* Copyright (c) 1995-1996 Mark Lord
* May be copied or modified under the terms of the GNU General Public License
*/
/*
- * This module provides support for the Bus Master IDE DMA function
- * of the Intel PCI Triton I/II chipsets (i82371FB or i82371SB).
- *
- * Pretty much the same code will work for the OPTi "Viper" chipset.
- * Look for DMA support for this in linux kernel 2.1.xx, when it appears.
- *
- * DMA is currently supported only for hard disk drives (not cdroms).
- *
- * Support for cdroms will likely be added at a later date,
- * after broader experience has been obtained with hard disks.
- *
- * Up to four drives may be enabled for DMA, and the Triton chipset will
- * (hopefully) arbitrate the PCI bus among them. Note that the i82371 chip
- * provides a single "line buffer" for the BM IDE function, so performance of
- * multiple (two) drives doing DMA simultaneously will suffer somewhat,
- * as they contest for that resource bottleneck. This is handled transparently
- * inside the i82371 chip.
- *
- * By default, DMA support is prepared for use, but is currently enabled only
- * for drives which support multi-word DMA mode2 (mword2), or which are
- * recognized as "good" (see table below). Drives with only mode0 or mode1
- * (single or multi) DMA should also work with this chipset/driver (eg. MC2112A)
- * but are not enabled by default. Use "hdparm -i" to view modes supported
- * by a given drive.
- *
- * The hdparm-2.4 (or later) utility can be used for manually enabling/disabling
- * DMA support, but must be (re-)compiled against this kernel version or later.
- *
- * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting.
- * If problems arise, ide.c will disable DMA operation after a few retries.
- * This error recovery mechanism works and has been extremely well exercised.
- *
- * IDE drives, depending on their vintage, may support several different modes
- * of DMA operation. The boot-time modes are indicated with a "*" in
- * the "hdparm -i" listing, and can be changed with *knowledgeable* use of
- * the "hdparm -X" feature. There is seldom a need to do this, as drives
- * normally power-up with their "best" PIO/DMA modes enabled.
- *
- * Testing was done with an ASUS P55TP4XE/100 system and the following drives:
- *
- * Quantum Fireball 1080A (1Gig w/83kB buffer), DMA mode2, PIO mode4.
- * - DMA mode2 works well (7.4MB/sec), despite the tiny on-drive buffer.
- * - This drive also does PIO mode4, at about the same speed as DMA mode2.
- * An awesome drive for the price!
- *
- * Fujitsu M1606TA (1Gig w/256kB buffer), DMA mode2, PIO mode4.
- * - DMA mode2 gives horrible performance (1.6MB/sec), despite the good
- * size of the on-drive buffer and a boasted 10ms average access time.
- * - PIO mode4 was better, but peaked at a mere 4.5MB/sec.
- *
- * Micropolis MC2112A (1Gig w/508kB buffer), drive pre-dates EIDE and ATA2.
- * - DMA works fine (2.2MB/sec), probably due to the large on-drive buffer.
- * - This older drive can also be tweaked for fastPIO (3.7MB/sec) by using
- * maximum clock settings (5,4) and setting all flags except prefetch.
- *
- * Western Digital AC31000H (1Gig w/128kB buffer), DMA mode1, PIO mode3.
- * - DMA does not work reliably. The drive appears to be somewhat tardy
- * in deasserting DMARQ at the end of a sector. This is evident in
- * the observation that WRITEs work most of the time, depending on
- * cache-buffer occupancy, but multi-sector reads seldom work.
- *
- * Testing was done with a Gigabyte GA-586 ATE system and the following drive:
- * (Uwe Bonnes - bon@elektron.ikp.physik.th-darmstadt.de)
- *
- * Western Digital AC31600H (1.6Gig w/128kB buffer), DMA mode2, PIO mode4.
- * - much better than its 1Gig cousin, this drive is reported to work
- * very well with DMA (7.3MB/sec).
- *
- * Other drives:
- *
- * Maxtor 7540AV (515Meg w/32kB buffer), DMA modes mword0/sword2, PIO mode3.
- * - a budget drive, with budget performance, around 3MB/sec.
- *
- * Western Digital AC2850F (814Meg w/64kB buffer), DMA mode1, PIO mode3.
- * - another "caviar" drive, similar to the AC31000, except that this one
- * worked with DMA in at least one system. Throughput is about 3.8MB/sec
- * for both DMA and PIO.
- *
- * Conner CFS850A (812Meg w/64kB buffer), DMA mode2, PIO mode4.
- * - like most Conner models, this drive proves that even a fast interface
- * cannot improve slow media. Both DMA and PIO peak around 3.5MB/sec.
- *
- * Maxtor 71260AT (1204Meg w/256kB buffer), DMA mword0/sword2, PIO mode3.
- * - works with DMA, on some systems (but not always on others, eg. Dell),
- * giving 3-4MB/sec performance, about the same as mode3.
- *
- * If you have any drive models to add, email your results to: mlord@pobox.com
- * Keep an eye on /var/adm/messages for "DMA disabled" messages.
- *
- * Some people have reported trouble with Intel Zappa motherboards.
- * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0,
- * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe
- * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this).
- *
- * And, yes, Intel Zappa boards really *do* use the Triton IDE ports.
+ * This module provides support for Bus Master IDE DMA functions in various
+ * motherboard chipsets and PCI controller cards.
+ * Please check /Documentation/ide.txt and /Documentation/udma.txt for details.
*/
+
#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -116,10 +29,24 @@
#include <asm/io.h>
#include <asm/dma.h>
+#include <asm/irq.h>
#include "ide.h"
#undef DISPLAY_TRITON_TIMINGS /* define this to display timings */
+#undef DISPLAY_APOLLO_TIMINGS /* define this for extensive debugging information */
+#undef DISPLAY_ALI15X3_TIMINGS /* define this for extensive debugging information */
+
+#if defined(CONFIG_PROC_FS)
+#include <linux/stat.h>
+#include <linux/proc_fs.h>
+#ifdef DISPLAY_APOLLO_TIMINGS
+#include <linux/via_ide_dma.h>
+#endif
+#ifdef DISPLAY_ALI15X3_TIMINGS
+#include <linux/ali_ide_dma.h>
+#endif
+#endif
/*
* good_dma_drives() lists the model names (from "hdparm -i")
@@ -133,6 +60,27 @@ const char *good_dma_drives[] = {"Micropolis 2112A",
NULL};
/*
+ * bad_dma_drives() lists the model names (from "hdparm -i")
+ * of drives which supposedly support (U)DMA but which are
+ * known to corrupt data with this interface under Linux.
+ *
+ * Note: the list was generated by statistical analysis of problem
+ * reports. It's not clear if there are problems with the drives,
+ * or with some combination of drive/controller or what.
+ *
+ * You can forcibly override this if you wish. This is the kernel
+ * 'Tread carefully' list.
+ *
+ * Finally see http://www.wdc.com/quality/err-rec.html if you have
+ * one of the listed drives.
+ */
+const char *bad_dma_drives[] = {"WDC AC11000H",
+ "WDC AC22100H",
+ "WDC AC32500H",
+ "WDC AC33100H",
+ NULL};
+
+/*
* Our Physical Region Descriptor (PRD) table should be large enough
* to handle the biggest I/O request we are likely to see. Since requests
* can have no more than 256 sectors, and since the typical blocksize is
@@ -151,6 +99,8 @@ const char *good_dma_drives[] = {"Micropolis 2112A",
#define PRD_BYTES 8
#define PRD_ENTRIES (PAGE_SIZE / (2 * PRD_BYTES))
#define DEFAULT_BMIBA 0xe800 /* in case BIOS did not init it */
+#define DEFAULT_BMCRBA 0xcc00 /* VIA's default value */
+#define DEFAULT_BMALIBA 0xd400 /* ALI's default value */
/*
* dma_intr() is the handler for disk read/write DMA interrupts
@@ -245,23 +195,57 @@ static int build_dmatable (ide_drive_t *drive)
return 1; /* let the PIO routines handle this weirdness */
}
+/*
+ * We will only enable drives with multi-word (mode2) (U)DMA capabilities,
+ * and ignore the very rare cases of drives that can only do single-word
+ * (modes 0 & 1) (U)DMA transfers. We also discard "blacklisted" hard disks.
+ */
static int config_drive_for_dma (ide_drive_t *drive)
{
+#ifndef CONFIG_BLK_DEV_FORCE_DMA
const char **list;
struct hd_driveid *id = drive->id;
+#endif
+
+#ifdef CONFIG_BLK_DEV_FORCE_DMA
+ drive->using_dma = 1;
+ return 0;
+#else
+ if (HWIF(drive)->chipset == ide_hpt343) {
+ drive->using_dma = 0; /* no DMA */
+ return 1; /* DMA disabled */
+ }
if (id && (id->capability & 1)) {
- /* Enable DMA on any drive that has UltraDMA (mode 0/1/2) enabled */
- if (id->field_valid & 4) /* UltraDMA */
- if ((id->dma_ultra & (id->dma_ultra >> 8) & 7)) {
+ /* Consult the list of known "bad" drives */
+ list = bad_dma_drives;
+ while (*list) {
+ if (!strcmp(*list++,id->model)) {
+ drive->using_dma = 0; /* no DMA */
+ printk("ide: Disabling DMA modes on %s drive (%s).\n", drive->name, id->model);
+ return 1; /* DMA disabled */
+ }
+ }
+ /* Enable DMA on any drive that has mode 4 or 2 UltraDMA enabled */
+ if (id->field_valid & 4) { /* UltraDMA */
+ /* Enable DMA on any drive that has mode 4 UltraDMA enabled */
+ if (((id->dma_ultra & 0x1010) == 0x1010) &&
+ (id->word93 & 0x2000) &&
+ (HWIF(drive)->chipset == ide_ultra66)) {
drive->using_dma = 1;
- return 0; /* dma enabled */
+ return 0; /* DMA enabled */
+ } else
+ /* Enable DMA on any drive that has mode 2 UltraDMA enabled */
+ if ((id->dma_ultra & 0x404) == 0x404) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
}
- /* Enable DMA on any drive that has mode2 DMA (multi or single) enabled */
+ }
+ /* Enable DMA on any drive that has mode2 DMA enabled */
if (id->field_valid & 2) /* regular DMA */
- if ((id->dma_mword & 0x404) == 0x404 || (id->dma_1word & 0x404) == 0x404) {
+ if ((id->dma_mword & 0x404) == 0x404) {
drive->using_dma = 1;
- return 0; /* dma enabled */
+ return 0; /* DMA enabled */
}
/* Consult the list of known "good" drives */
list = good_dma_drives;
@@ -273,6 +257,7 @@ static int config_drive_for_dma (ide_drive_t *drive)
}
}
return 1; /* DMA not enabled */
+#endif
}
/*
@@ -388,22 +373,260 @@ static void init_triton_dma (ide_hwif_t *hwif, unsigned short base)
}
/*
+ * Set VIA Chipset Timings for (U)DMA modes enabled.
+ */
+static int set_via_timings (byte bus, byte fn, byte post, byte flush)
+{
+ byte via_config = 0;
+ int rc = 0;
+
+ /* setting IDE read prefetch buffer and IDE post write buffer */
+ if ((rc = pcibios_read_config_byte(bus, fn, 0x41, &via_config)))
+ return (1);
+ if ((rc = pcibios_write_config_byte(bus, fn, 0x41, via_config | post)))
+ return (1);
+
+ /* setting Channel read and End-of-sector FIFO flush: */
+ if ((rc = pcibios_read_config_byte(bus, fn, 0x46, &via_config)))
+ return (1);
+ if ((rc = pcibios_write_config_byte(bus, fn, 0x46, via_config | flush)))
+ return (1);
+
+ return (0);
+}
+
+static int setup_aladdin (byte bus, byte fn)
+{
+ byte confreg0 = 0, confreg1 = 0, progif = 0;
+ int errors = 0;
+
+ if (pcibios_read_config_byte(bus, fn, 0x50, &confreg1))
+ goto veryspecialsettingserror;
+ if (!(confreg1 & 0x02))
+ if (pcibios_write_config_byte(bus, fn, 0x50, confreg1 | 0x02))
+ goto veryspecialsettingserror;
+
+ if (pcibios_read_config_byte(bus, fn, 0x09, &progif))
+ goto veryspecialsettingserror;
+ if (!(progif & 0x40)) {
+ /*
+ * The way to enable them is to set progif
+ * writable at 0x4Dh register, and set bit 6
+ * of progif to 1:
+ */
+ if (pcibios_read_config_byte(bus, fn, 0x4d, &confreg0))
+ goto veryspecialsettingserror;
+ if (confreg0 & 0x80)
+ if (pcibios_write_config_byte(bus, fn, 0x4d, confreg0 & ~0x80))
+ goto veryspecialsettingserror;
+ if (pcibios_write_config_byte(bus, fn, 0x09, progif | 0x40))
+ goto veryspecialsettingserror;
+ if (confreg0 & 0x80)
+ if (pcibios_write_config_byte(bus, fn, 0x4d, confreg0))
+ errors++;
+ }
+
+ if ((pcibios_read_config_byte(bus, fn, 0x09, &progif)) || (!(progif & 0x40)))
+ goto veryspecialsettingserror;
+
+ printk("ide: ALI15X3: enabled read of IDE channels state (en/dis-abled) %s.\n",
+ errors ? "with Error(s)" : "Succeeded" );
+ return 1;
+veryspecialsettingserror:
+ printk("ide: ALI15X3: impossible to enable read of IDE channels state (en/dis-abled)!\n");
+ return 0;
+}
+
+void set_promise_hpt343_extra (unsigned short device, unsigned int bmiba)
+{
+ switch(device) {
+ case PCI_DEVICE_ID_PROMISE_20246:
+ if(!check_region((bmiba+16), 16))
+ request_region((bmiba+16), 16, "PDC20246");
+ break;
+ case PCI_DEVICE_ID_PROMISE_20262:
+ if (!check_region((bmiba+48), 48))
+ request_region((bmiba+48), 48, "PDC20262");
+ break;
+ case PCI_DEVICE_ID_TTI_HPT343:
+ if(!check_region((bmiba+16), 16))
+ request_region((bmiba+16), 16, "HPT343");
+ break;
+ default:
+ break;
+ }
+}
+
+#define HPT343_PCI_INIT_REG 0x80
+
+/*
* ide_init_triton() prepares the IDE driver for DMA operation.
* This routine is called once, from ide.c during driver initialization,
- * for each triton chipset which is found (unlikely to be more than one).
+ * for each BM-DMA chipset which is found (rarely more than one).
*/
void ide_init_triton (byte bus, byte fn)
{
- int rc = 0, h;
- int dma_enabled = 0;
- unsigned short pcicmd;
- unsigned int bmiba, timings;
+ byte bridgebus, bridgefn, bridgeset = 0, hpt34x_flag = 0;
+ unsigned char irq = 0;
+ int dma_enabled = 0, rc = 0, h;
+ unsigned short io[6], count = 0, step_count = 0, pass_count = 0;
+ unsigned short pcicmd, vendor, device, class;
+ unsigned int bmiba, timings, reg, tmp;
+ unsigned int addressbios = 0;
+ unsigned long flags;
+ unsigned index;
+
+#if defined(DISPLAY_APOLLO_TIMINGS) || defined(DISPLAY_ALI15X3_TIMINGS)
+ bmide_bus = bus;
+ bmide_fn = fn;
+#endif /* DISPLAY_APOLLO_TIMINGS || DISPLAY_ALI15X3_TIMINGS */
+
+/*
+ * We pick up the vendor, device, and class info for selecting the correct
+ * controller that is supported. Since we can access this routine more than
+ * once with the use of onboard and off-board EIDE controllers, a method
+ * of determining "who is who for what" is needed.
+ */
+
+ pcibios_read_config_word (bus, fn, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word (bus, fn, PCI_DEVICE_ID, &device);
+ pcibios_read_config_word (bus, fn, PCI_CLASS_DEVICE, &class);
+ pcibios_read_config_byte (bus, fn, PCI_INTERRUPT_LINE, &irq);
+
+ switch(vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ printk("ide: Intel 82371 ");
+ switch(device) {
+ case PCI_DEVICE_ID_INTEL_82371_0:
+ printk("PIIX (single FIFO) ");
+ break;
+ case PCI_DEVICE_ID_INTEL_82371SB_1:
+ printk("PIIX3 (dual FIFO) ");
+ break;
+ case PCI_DEVICE_ID_INTEL_82371AB:
+ printk("PIIX4 (dual FIFO) ");
+ break;
+ default:
+ printk(" (unknown) 0x%04x ", device);
+ break;
+ }
+ printk("DMA Bus Mastering IDE ");
+ break;
+ case PCI_VENDOR_ID_SI:
+ printk("ide: SiS 5513 (dual FIFO) DMA Bus Mastering IDE ");
+ break;
+ case PCI_VENDOR_ID_VIA:
+ printk("ide: VIA VT82C586B (split FIFO) UDMA Bus Mastering IDE ");
+ break;
+ case PCI_VENDOR_ID_TTI:
+ /*PCI_CLASS_STORAGE_UNKNOWN == class */
+ if (device == PCI_DEVICE_ID_TTI_HPT343) {
+ pcibios_write_config_byte(bus, fn, HPT343_PCI_INIT_REG, 0x00);
+ pcibios_read_config_word(bus, fn, PCI_COMMAND, &pcicmd);
+ hpt34x_flag = (pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0;
+#if 1
+ if (!hpt34x_flag) {
+ save_flags(flags);
+ cli();
+ pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd & ~PCI_COMMAND_IO);
+ pcibios_read_config_dword(bus, fn, PCI_BASE_ADDRESS_4, &bmiba);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_0, bmiba | 0x20);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_1, bmiba | 0x34);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_2, bmiba | 0x28);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_3, bmiba | 0x3c);
+ pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd);
+ bmiba = 0;
+ restore_flags(flags);
+ }
+#endif
+ pcibios_write_config_byte(bus, fn, PCI_LATENCY_TIMER, 0x20);
+ goto hpt343_jump_in;
+ } else {
+ printk("ide: HPTXXX did == 0x%04X unsupport chipset error.\n", device);
+ return;
+ }
+ case PCI_VENDOR_ID_PROMISE:
+ /*
+ * I have been able to make my Promise Ultra33 UDMA card change class.
+ * It has reported as both PCI_CLASS_STORAGE_RAID and PCI_CLASS_STORAGE_IDE.
+ * Since the PCI_CLASS_STORAGE_RAID mode should automatically mirror the
+ * two halves of the PCI_CONFIG register data, but sometimes it forgets.
+ * Thus we guarantee that they are identical, with a quick check and
+ * correction if needed.
+ * PDC20246 (primary) PDC20247 (secondary) IDE hwif's.
+ *
+ * PDC20262 Promise Ultra66 UDMA.
+ *
+ * Note that Promise "stories,fibs,..." about this device not being
+ * capable of ATAPI and AT devices.
+ */
+ if (class != PCI_CLASS_STORAGE_IDE) {
+ unsigned char irq_mirror = 0;
+
+ pcibios_read_config_byte(bus, fn, (PCI_INTERRUPT_LINE)|0x80, &irq_mirror);
+ if (irq != irq_mirror) {
+ pcibios_write_config_byte(bus, fn, (PCI_INTERRUPT_LINE)|0x80, irq);
+ }
+ }
+ case PCI_VENDOR_ID_ARTOP:
+ /* PCI_CLASS_STORAGE_SCSI == class */
+ /*
+ * I have found that by stroking rom_enable_bit on both the AEC6210U/UF and
+ * PDC20246 controller cards, the features desired are almost guaranteed
+ * to be enabled and compatible. This ROM may not be registered in the
+ * config data, but it can be turned on. Registration failure has only
+ * been observed if and only if Linux sets up the pci_io_address in the
+ * 0x6000 range. If they are setup in the 0xef00 range it is reported.
+ * WHY??? got me.........
+ */
+hpt343_jump_in:
+ printk("ide: %s UDMA Bus Mastering ",
+ (device == PCI_DEVICE_ID_ARTOP_ATP850UF) ? "AEC6210" :
+ (device == PCI_DEVICE_ID_PROMISE_20246) ? "PDC20246" :
+ (device == PCI_DEVICE_ID_PROMISE_20262) ? "PDC20262" :
+ (hpt34x_flag && (device == PCI_DEVICE_ID_TTI_HPT343)) ? "HPT345" :
+ (device == PCI_DEVICE_ID_TTI_HPT343) ? "HPT343" : "UNKNOWN");
+ pcibios_read_config_dword(bus, fn, PCI_ROM_ADDRESS, &addressbios);
+ if (addressbios) {
+ pcibios_write_config_byte(bus, fn, PCI_ROM_ADDRESS, addressbios | PCI_ROM_ADDRESS_ENABLE);
+ printk("with ROM enabled at 0x%08x", addressbios);
+ }
+ /*
+ * This was stripped out of 2.1.XXX kernel code and parts from a patch called
+ * promise_update. This finds the PCI_BASE_ADDRESS spaces and makes them
+ * available for configuration later.
+ * PCI_BASE_ADDRESS_0 hwif0->io_base
+ * PCI_BASE_ADDRESS_1 hwif0->ctl_port
+ * PCI_BASE_ADDRESS_2 hwif1->io_base
+ * PCI_BASE_ADDRESS_3 hwif1->ctl_port
+ * PCI_BASE_ADDRESS_4 bmiba
+ */
+ memset(io, 0, 6 * sizeof(unsigned short));
+ for (reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg += 4) {
+ pcibios_read_config_dword(bus, fn, reg, &tmp);
+ if (tmp & PCI_BASE_ADDRESS_SPACE_IO)
+ io[count++] = tmp & PCI_BASE_ADDRESS_IO_MASK;
+ }
+ break;
+ case PCI_VENDOR_ID_AL:
+ save_flags(flags);
+ cli();
+ for (index = 0; !pcibios_find_device (PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, index, &bridgebus, &bridgefn); ++index) {
+ bridgeset = setup_aladdin(bus, fn);
+ }
+ restore_flags(flags);
+ printk("ide: ALI15X3 (dual FIFO) DMA Bus Mastering IDE ");
+ break;
+ default:
+ return;
+ }
+
+ printk("\n Controller on PCI bus %d function %d\n", bus, fn);
- printk("ide: i82371 PIIX (Triton) on PCI bus %d function %d\n", bus, fn);
/*
* See if IDE and BM-DMA features are enabled:
*/
- if ((rc = pcibios_read_config_word(bus, fn, 0x04, &pcicmd)))
+ if ((rc = pcibios_read_config_word(bus, fn, PCI_COMMAND, &pcicmd)))
goto quit;
if ((pcicmd & 1) == 0) {
printk("ide: ports are not enabled (BIOS)\n");
@@ -427,21 +650,29 @@ void ide_init_triton (byte bus, byte fn)
*/
int try_again = 1;
do {
- if ((rc = pcibios_read_config_dword(bus, fn, 0x20, &bmiba)))
+ if ((rc = pcibios_read_config_dword(bus, fn, PCI_BASE_ADDRESS_4, &bmiba)))
goto quit;
bmiba &= 0xfff0; /* extract port base address */
if (bmiba) {
dma_enabled = 1;
break;
} else {
- printk("ide: BM-DMA base register is invalid (0x%04x, PnP BIOS problem)\n", bmiba);
- if (inb(DEFAULT_BMIBA) != 0xff || !try_again)
+ printk("ide: BM-DMA base register is invalid (0x%04x, PnP BIOS problem)\n", bmiba);
+ if (inb(((vendor == PCI_VENDOR_ID_AL) ? DEFAULT_BMALIBA :
+ (vendor == PCI_VENDOR_ID_VIA) ? DEFAULT_BMCRBA :
+ DEFAULT_BMIBA)) != 0xff || !try_again)
break;
- printk("ide: setting BM-DMA base register to 0x%04x\n", DEFAULT_BMIBA);
- if ((rc = pcibios_write_config_word(bus, fn, 0x04, pcicmd&~1)))
+ printk("ide: setting BM-DMA base register to 0x%04x\n",
+ ((vendor == PCI_VENDOR_ID_AL) ? DEFAULT_BMALIBA :
+ (vendor == PCI_VENDOR_ID_VIA) ? DEFAULT_BMCRBA :
+ DEFAULT_BMIBA));
+ if ((rc = pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd&~1)))
goto quit;
- rc = pcibios_write_config_dword(bus, fn, 0x20, DEFAULT_BMIBA|1);
- if (pcibios_write_config_word(bus, fn, 0x04, pcicmd|5) || rc)
+ rc = pcibios_write_config_dword(bus, fn, 0x20,
+ ((vendor == PCI_VENDOR_ID_AL) ? DEFAULT_BMALIBA :
+ (vendor == PCI_VENDOR_ID_VIA) ? DEFAULT_BMCRBA :
+ DEFAULT_BMIBA)|1);
+ if (pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd|5) || rc)
goto quit;
}
} while (try_again--);
@@ -450,89 +681,308 @@ void ide_init_triton (byte bus, byte fn)
/*
* See if ide port(s) are enabled
*/
- if ((rc = pcibios_read_config_dword(bus, fn, 0x40, &timings)))
- goto quit;
- if (!(timings & 0x80008000)) {
- printk("ide: neither port is enabled\n");
+ if ((rc = pcibios_read_config_dword(bus, fn,
+ (vendor == PCI_VENDOR_ID_PROMISE) ? 0x50 :
+ (vendor == PCI_VENDOR_ID_ARTOP) ? 0x54 :
+ (vendor == PCI_VENDOR_ID_SI) ? 0x48 :
+ (vendor == PCI_VENDOR_ID_AL) ? 0x08 :
+ 0x40, &timings)))
goto quit;
- }
+ /*
+ * We do a vendor check since the Ultra33/66 and AEC6210
+ * holds their timings in a different location.
+ */
+#if 0
+ printk("ide: timings == %08x\n", timings);
+#endif
+ /*
+ * The switch preserves some stuff that was original.
+ */
+ switch(vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ if (!(timings & 0x80008000)) {
+ printk("ide: INTEL: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_VIA:
+ if(!(timings & 0x03)) {
+ printk("ide: VIA: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_AL:
+ timings <<= 16;
+ timings >>= 24;
+ if (!(timings & 0x30)) {
+ printk("ide: ALI15X3: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_SI:
+ timings <<= 8;
+ timings >>= 24;
+ if (!(timings & 0x06)) {
+ printk("ide: SIS5513: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_PROMISE:
+ printk(" (U)DMA Burst Bit %sABLED " \
+ "Primary %s Mode " \
+ "Secondary %s Mode.\n",
+ (inb(bmiba + 0x001f) & 1) ? "EN" : "DIS",
+ (inb(bmiba + 0x001a) & 1) ? "MASTER" : "PCI",
+ (inb(bmiba + 0x001b) & 1) ? "MASTER" : "PCI" );
+#if 0
+ if (!(inb(bmiba + 0x001f) & 1)) {
+ outb(inb(bmiba + 0x001f)|0x01, (bmiba + 0x001f));
+ printk(" (U)DMA Burst Bit Forced %sABLED.\n",
+ (inb(bmiba + 0x001f) & 1) ? "EN" : "DIS");
+ }
+#endif
+ break;
+ case PCI_VENDOR_ID_ARTOP:
+ case PCI_VENDOR_ID_TTI:
+ default:
+ break;
+ }
/*
* Save the dma_base port addr for each interface
*/
for (h = 0; h < MAX_HWIFS; ++h) {
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ byte channel = ((h == 1) || (h == 3) || (h == 5)) ? 1 : 0;
+
+ /*
+ * This prevents the first contoller from accidentally
+ * initalizing the hwif's that it does not use and block
+ * an off-board ide-pci from getting in the game.
+ */
+ if ((step_count >= 2) || (pass_count >= 2)) {
+ goto quit;
+ }
+
+#if 0
+ if (hwif->chipset == ide_unknown)
+ printk("ide: index == %d channel(%d)\n", h, channel);
+#endif
+
+#ifdef CONFIG_BLK_DEV_OFFBOARD
+ /*
+ * This is a forced override for the onboard ide controller
+ * to be enabled, if one chooses to have an offboard ide-pci
+ * card as the primary booting device. This beasty is
+ * for offboard UDMA upgrades with hard disks, but saving
+ * the onboard DMA2 controllers for CDROMS, TAPES, ZIPS, etc...
+ */
+ if (((vendor == PCI_VENDOR_ID_INTEL) ||
+ (vendor == PCI_VENDOR_ID_SI) ||
+ (vendor == PCI_VENDOR_ID_VIA) ||
+ (vendor == PCI_VENDOR_ID_AL)) && (h >= 2)) {
+ hwif->io_base = channel ? 0x170 : 0x1f0;
+ hwif->ctl_port = channel ? 0x376 : 0x3f6;
+ hwif->irq = channel ? 15 : 14;
+ hwif->noprobe = 0;
+ }
+#endif /* CONFIG_BLK_DEV_OFFBOARD */
+ /*
+ * If the chipset is listed as "ide_unknown", lets get a
+ * hwif while they last. This does the first check on
+ * the current availability of the ide_hwifs[h] in question.
+ */
+ if (hwif->chipset != ide_unknown) {
+ continue;
+ } else if (vendor == PCI_VENDOR_ID_INTEL) {
+ unsigned short time;
#ifdef DISPLAY_TRITON_TIMINGS
- byte s_clks, r_clks;
- unsigned short devid;
+ byte s_clks, r_clks;
+ unsigned short devid;
#endif /* DISPLAY_TRITON_TIMINGS */
- ide_hwif_t *hwif = &ide_hwifs[h];
- unsigned short time;
- if (hwif->io_base == 0x1f0) {
- time = timings & 0xffff;
- if ((time & 0x8000) == 0) /* interface enabled? */
- continue;
- hwif->chipset = ide_triton;
- if (dma_enabled)
- init_triton_dma(hwif, bmiba);
- } else if (hwif->io_base == 0x170) {
- time = timings >> 16;
- if ((time & 0x8000) == 0) /* interface enabled? */
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ time = timings & 0xffff;
+ if ((time & 0x8000) == 0) /* interface enabled? */
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ time = timings >> 16;
+ if ((time & 0x8000) == 0) /* interface enabled? */
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ step_count++;
+ } else {
continue;
- hwif->chipset = ide_triton;
- if (dma_enabled)
- init_triton_dma(hwif, bmiba + 8);
- } else
- continue;
+ }
#ifdef DISPLAY_TRITON_TIMINGS
- s_clks = ((~time >> 12) & 3) + 2;
- r_clks = ((~time >> 8) & 3) + 1;
- printk(" %s timing: (0x%04x) sample_CLKs=%d, recovery_CLKs=%d\n",
- hwif->name, time, s_clks, r_clks);
- if ((time & 0x40) && !pcibios_read_config_word(bus, fn, 0x02, &devid)
- && devid == PCI_DEVICE_ID_INTEL_82371SB_1)
- {
- byte stime;
- if (pcibios_read_config_byte(bus, fn, 0x44, &stime)) {
- if (hwif->io_base == 0x1f0) {
- s_clks = ~stime >> 6;
- r_clks = ~stime >> 4;
+ s_clks = ((~time >> 12) & 3) + 2;
+ r_clks = ((~time >> 8) & 3) + 1;
+ printk(" %s timing: (0x%04x) sample_CLKs=%d, recovery_CLKs=%d\n",
+ hwif->name, time, s_clks, r_clks);
+ if ((time & 0x40) && !pcibios_read_config_word(bus, fn, PCI_DEVICE_ID, &devid)
+ && devid == PCI_DEVICE_ID_INTEL_82371SB_1) {
+ byte stime;
+ if (pcibios_read_config_byte(bus, fn, 0x44, &stime)) {
+ if (hwif->io_base == 0x1f0) {
+ s_clks = ~stime >> 6;
+ r_clks = ~stime >> 4;
+ } else {
+ s_clks = ~stime >> 2;
+ r_clks = ~stime;
+ }
+ s_clks = (s_clks & 3) + 2;
+ r_clks = (r_clks & 3) + 1;
+ printk(" slave: sample_CLKs=%d, recovery_CLKs=%d\n",
+ s_clks, r_clks);
+ }
+ }
+ print_triton_drive_flags (0, time & 0xf);
+ print_triton_drive_flags (1, (time >> 4) & 0xf);
+#endif /* DISPLAY_TRITON_TIMINGS */
+ } else if (vendor == PCI_VENDOR_ID_SI) {
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ if ((timings & 0x02) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ if ((timings & 0x04) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ step_count++;
+ } else {
+ continue;
+ }
+ } else if (vendor == PCI_VENDOR_ID_VIA) {
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ if ((timings & 0x02) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ if (set_via_timings(bus, fn, 0xc0, 0xa0))
+ goto quit;
+#ifdef DISPLAY_APOLLO_TIMINGS
+ proc_register_dynamic(&proc_root, &via_proc_entry);
+#endif /* DISPLAY_APOLLO_TIMINGS */
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ if ((timings & 0x01) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ if (set_via_timings(bus, fn, 0x30, 0x50))
+ goto quit;
+ step_count++;
+ } else {
+ continue;
+ }
+ } else if (vendor == PCI_VENDOR_ID_AL) {
+ byte ideic, inmir;
+ byte irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
+ 1, 11, 0, 12, 0, 14, 0, 15 };
+
+ if (bridgeset) {
+ pcibios_read_config_byte(bridgebus, bridgefn, 0x58, &ideic);
+ ideic = ideic & 0x03;
+ if ((channel && ideic == 0x03) || (!channel && !ideic)) {
+ pcibios_read_config_byte(bridgebus, bridgefn, 0x44, &inmir);
+ inmir = inmir & 0x0f;
+ hwif->irq = irq_routing_table[inmir];
+ } else if (channel && !(ideic & 0x01)) {
+ pcibios_read_config_byte(bridgebus, bridgefn, 0x75, &inmir);
+ inmir = inmir & 0x0f;
+ hwif->irq = irq_routing_table[inmir];
+ }
+ }
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ if ((timings & 0x20) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ outb(inb(bmiba+2) & 0x60, bmiba+2);
+ if (inb(bmiba+2) & 0x80)
+ printk("ALI15X3: simplex device: DMA forced\n");
+#ifdef DISPLAY_ALI15X3_TIMINGS
+ proc_register_dynamic(&proc_root, &ali_proc_entry);
+#endif /* DISPLAY_ALI15X3_TIMINGS */
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ if ((timings & 0x10) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ outb(inb(bmiba+10) & 0x60, bmiba+10);
+ if (inb(bmiba+10) & 0x80)
+ printk("ALI15X3: simplex device: DMA forced\n");
+ step_count++;
+ } else {
+ continue;
+ }
+ } else if ((vendor == PCI_VENDOR_ID_PROMISE) ||
+ (vendor == PCI_VENDOR_ID_ARTOP) ||
+ (vendor == PCI_VENDOR_ID_TTI)) {
+ pass_count++;
+ if (vendor == PCI_VENDOR_ID_TTI) {
+ if ((!hpt34x_flag) && (h < 2)) {
+ goto quit;
+ } else if (hpt34x_flag) {
+ hwif->io_base = channel ? (bmiba + 0x28) : (bmiba + 0x20);
+ hwif->ctl_port = channel ? (bmiba + 0x3e) : (bmiba + 0x36);
+ } else {
+ goto io_temps;
+ }
+ } else {
+io_temps:
+ tmp = channel ? 2 : 0;
+ hwif->io_base = io[tmp];
+ hwif->ctl_port = io[tmp + 1] + 2;
+ }
+ hwif->irq = irq;
+ hwif->noprobe = 0;
+
+ if (device == PCI_DEVICE_ID_ARTOP_ATP850UF) {
+ hwif->serialized = 1;
+ }
+
+ if ((vendor == PCI_VENDOR_ID_PROMISE) ||
+ (vendor == PCI_VENDOR_ID_TTI)) {
+ set_promise_hpt343_extra(device, bmiba);
+ }
+
+ if (dma_enabled) {
+ if ((!check_region(bmiba, 8)) && (!channel)) {
+ hwif->chipset = ((vendor == PCI_VENDOR_ID_TTI) && !hpt34x_flag) ? ide_hpt343 :
+ (device == PCI_DEVICE_ID_PROMISE_20262) ? ide_ultra66 : ide_udma;
+ init_triton_dma(hwif, bmiba);
+ step_count++;
+ } else if ((!check_region((bmiba + 0x08), 8)) && (channel)) {
+ hwif->chipset = ((vendor == PCI_VENDOR_ID_TTI) && !hpt34x_flag) ? ide_hpt343 :
+ (device == PCI_DEVICE_ID_PROMISE_20262) ? ide_ultra66 : ide_udma;
+ init_triton_dma(hwif, bmiba + 8);
+ step_count++;
} else {
- s_clks = ~stime >> 2;
- r_clks = ~stime;
+ continue;
}
- s_clks = (s_clks & 3) + 2;
- r_clks = (r_clks & 3) + 1;
- printk(" slave: sample_CLKs=%d, recovery_CLKs=%d\n",
- s_clks, r_clks);
}
}
- print_triton_drive_flags (0, time & 0xf);
- print_triton_drive_flags (1, (time >> 4) & 0xf);
-#endif /* DISPLAY_TRITON_TIMINGS */
}
-quit: if (rc) printk("ide: pcibios access failed - %s\n", pcibios_strerror(rc));
-}
-
-void ide_init_promise (byte bus, byte fn, ide_hwif_t *hwif0, ide_hwif_t *hwif1, unsigned short dma)
-{
- int rc;
- unsigned short pcicmd;
- unsigned int bmiba = 0;
-
- printk("ide: Enabling DMA for Promise Technology IDE Ultra-DMA 33 on PCI bus %d function %d, port 0x%04x\n", bus, fn, dma);
- if ((rc = pcibios_read_config_word(bus, fn, 0x04, &pcicmd)) || (pcicmd & 1) == 0 || (pcicmd & 4) == 0)
- goto abort;
- if ((rc = pcibios_read_config_dword(bus, fn, 0x20, &bmiba)))
- goto abort;
- bmiba &= 0xfff0; /* extract port base address */
- if (bmiba != dma || !bmiba)
- goto abort;
- hwif0->chipset = ide_promise_udma;
- hwif1->chipset = ide_promise_udma;
- init_triton_dma(hwif0, bmiba);
- init_triton_dma(hwif1, bmiba + 0x08);
- return;
-abort:
- printk(KERN_WARNING "ide: Promise/33 not configured correctly (BIOS)\n");
+ quit: if (rc) printk("ide: pcibios access failed - %s\n", pcibios_strerror(rc));
}
diff --git a/linux/src/drivers/net/3c503.c b/linux/src/drivers/net/3c503.c
index a562db35..8ce488d8 100644
--- a/linux/src/drivers/net/3c503.c
+++ b/linux/src/drivers/net/3c503.c
@@ -192,7 +192,7 @@ el2_probe1(struct device *dev, int ioaddr)
}
if (ei_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
dev->base_addr = ioaddr;
/* Allocate dev->priv and fill in 8390 specific dev fields. */
diff --git a/linux/src/drivers/net/3c505.c b/linux/src/drivers/net/3c505.c
index b2163ecc..d78dad5b 100644
--- a/linux/src/drivers/net/3c505.c
+++ b/linux/src/drivers/net/3c505.c
@@ -1408,7 +1408,7 @@ static int elp_sense(struct device *dev)
/* Wait for a while; the adapter may still be booting up */
if (elp_debug > 0)
- printk(stilllooking_msg);
+ printk("%s", stilllooking_msg);
if (orig_HCR & DIR) {
/* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
outb_control(orig_HCR & ~DIR, addr);
@@ -1439,7 +1439,7 @@ static int elp_sense(struct device *dev)
* a hard reset. Also, do a hard reset if selected at the compile time.
*/
if (elp_debug > 0)
- printk(found_msg);
+ printk("%s", found_msg);
return 0;
}
diff --git a/linux/src/drivers/net/8390.c b/linux/src/drivers/net/8390.c
index 9864106b..747ccb04 100644
--- a/linux/src/drivers/net/8390.c
+++ b/linux/src/drivers/net/8390.c
@@ -703,16 +703,13 @@ static void set_multicast_list(struct device *dev)
int ethdev_init(struct device *dev)
{
if (ei_debug > 1)
- printk(version);
+ printk("%s", version);
if (dev->priv == NULL) {
- struct ei_device *ei_local;
-
dev->priv = kmalloc(sizeof(struct ei_device), GFP_KERNEL);
if (dev->priv == NULL)
return -ENOMEM;
memset(dev->priv, 0, sizeof(struct ei_device));
- ei_local = (struct ei_device *)dev->priv;
}
dev->hard_start_xmit = &ei_start_xmit;
diff --git a/linux/src/drivers/net/at1700.c b/linux/src/drivers/net/at1700.c
index 953c1889..9e42ab48 100644
--- a/linux/src/drivers/net/at1700.c
+++ b/linux/src/drivers/net/at1700.c
@@ -595,7 +595,9 @@ net_rx(struct device *dev)
/* The inverse routine to net_open(). */
static int net_close(struct device *dev)
{
+#if 0
struct net_local *lp = (struct net_local *)dev->priv;
+#endif
int ioaddr = dev->base_addr;
dev->tbusy = 1;
diff --git a/linux/src/drivers/net/de4x5.c b/linux/src/drivers/net/de4x5.c
index 114f6a74..a66f0564 100644
--- a/linux/src/drivers/net/de4x5.c
+++ b/linux/src/drivers/net/de4x5.c
@@ -1493,7 +1493,7 @@ de4x5_sw_reset(struct device *dev)
status = -EIO;
}
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
lp->tx_old = lp->tx_new;
return status;
@@ -1553,7 +1553,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
#endif
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
dev->trans_start = jiffies;
if (TX_BUFFS_AVAIL) {
@@ -1708,7 +1708,7 @@ de4x5_rx(struct device *dev)
}
/* Change buffer ownership for this frame, back to the adapter */
- for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
+ for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old+1)%lp->rxRingSize) {
lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
barrier();
}
@@ -1719,7 +1719,7 @@ de4x5_rx(struct device *dev)
/*
** Update entry information
*/
- lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
}
return 0;
@@ -1768,7 +1768,7 @@ de4x5_tx(struct device *dev)
}
/* Update all the pointers */
- lp->tx_old = (++lp->tx_old) % lp->txRingSize;
+ lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
}
if (TX_BUFFS_AVAIL && dev->tbusy) { /* Any resources available? */
@@ -1838,7 +1838,7 @@ de4x5_rx_ovfc(struct device *dev)
for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
- lp->rx_new = (++lp->rx_new % lp->rxRingSize);
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
}
outl(omr, DE4X5_OMR);
@@ -1963,7 +1963,7 @@ set_multicast_list(struct device *dev)
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, NULL);
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
dev->trans_start = jiffies;
}
@@ -3576,7 +3576,7 @@ ping_media(struct device *dev, int msec)
lp->tmp = lp->tx_new; /* Remember the ring position */
load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), NULL);
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD);
}
@@ -5111,7 +5111,7 @@ mii_get_phy(struct device *dev)
lp->useMII = TRUE;
/* Search the MII address space for possible PHY devices */
- for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(++i)%DE4X5_MAX_MII) {
+ for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
lp->phy[lp->active].addr = i;
if (i==0) n++; /* Count cycles */
while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
@@ -5607,7 +5607,7 @@ de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
while (test_and_set_bit(0, (void *)&dev->tbusy) != 0);
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, NULL);
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
dev->tbusy = 0; /* Unlock the TX ring */
diff --git a/linux/src/drivers/net/depca.c b/linux/src/drivers/net/depca.c
index 8cf6fc80..e1b03429 100644
--- a/linux/src/drivers/net/depca.c
+++ b/linux/src/drivers/net/depca.c
@@ -966,7 +966,7 @@ depca_rx(struct device *dev)
}
}
/* Change buffer ownership for this last frame, back to the adapter */
- for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)&lp->rxRingMask) {
+ for (; lp->rx_old!=entry; lp->rx_old=(lp->rx_old+1)&lp->rxRingMask) {
writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN,
&lp->rx_ring[lp->rx_old].base);
}
@@ -976,7 +976,7 @@ depca_rx(struct device *dev)
/*
** Update entry information
*/
- lp->rx_new = (++lp->rx_new) & lp->rxRingMask;
+ lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
}
return 0;
@@ -1017,7 +1017,7 @@ depca_tx(struct device *dev)
}
/* Update all the pointers */
- lp->tx_old = (++lp->tx_old) & lp->txRingMask;
+ lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
}
return 0;
@@ -1540,7 +1540,7 @@ static int load_packet(struct device *dev, struct sk_buff *skb)
/* set up the buffer descriptors */
len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
- for (i = entry; i != end; i = (++i) & lp->txRingMask) {
+ for (i = entry; i != end; i = (i + 1) & lp->txRingMask) {
/* clean out flags */
writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
diff --git a/linux/src/drivers/net/pci-scan.c b/linux/src/drivers/net/pci-scan.c
index 6187d5db..60525b76 100644
--- a/linux/src/drivers/net/pci-scan.c
+++ b/linux/src/drivers/net/pci-scan.c
@@ -347,7 +347,7 @@ int pci_drv_register(struct drv_id_info *drv_id, void *initial_device)
pci_tbl[chip_idx].name, pciaddr, irq);
if ( ! (pci_flags & PCI_UNUSED_IRQ) &&
- (irq == 0 || irq == 255)) {
+ (irq == 0 || irq >= 16)) {
if (pci_bus == 32) /* Broken CardBus activation. */
printk(KERN_WARNING "Resources for CardBus device '%s' have"
" not been allocated.\n"
diff --git a/linux/src/drivers/scsi/FlashPoint.c b/linux/src/drivers/scsi/FlashPoint.c
index a74c3c5f..aae35c03 100644
--- a/linux/src/drivers/scsi/FlashPoint.c
+++ b/linux/src/drivers/scsi/FlashPoint.c
@@ -2326,7 +2326,6 @@ void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
extern unsigned int SccbGlobalFlags;
-#ident "$Id: FlashPoint.c,v 1.1 1999/04/26 05:53:56 tb Exp $"
/*----------------------------------------------------------------------
*
*
@@ -5352,7 +5351,7 @@ void Debug_Load(UCHAR p_card, UCHAR p_bug_data)
}
#endif
-#ident "$Id: FlashPoint.c,v 1.1 1999/04/26 05:53:56 tb Exp $"
+
/*----------------------------------------------------------------------
*
*
@@ -5424,7 +5423,7 @@ UCHAR debug_int[MAX_CARDS][debug_size] = { 0 };
UCHAR debug_index[MAX_CARDS] = { 0 };
UCHAR reserved_1[3] = { 0 };
#endif
-#ident "$Id: FlashPoint.c,v 1.1 1999/04/26 05:53:56 tb Exp $"
+
/*----------------------------------------------------------------------
*
*
@@ -7518,7 +7517,6 @@ void sinits(PSCCB p_sccb, UCHAR p_card)
}
-#ident "$Id: FlashPoint.c,v 1.1 1999/04/26 05:53:56 tb Exp $"
/*----------------------------------------------------------------------
*
*
@@ -8267,7 +8265,6 @@ void phaseBusFree(ULONG port, UCHAR p_card)
-#ident "$Id: FlashPoint.c,v 1.1 1999/04/26 05:53:56 tb Exp $"
/*----------------------------------------------------------------------
*
*
@@ -8659,7 +8656,7 @@ void autoCmdCmplt(ULONG p_port, UCHAR p_card)
queueCmdComplete(&BL_Card[p_card], currSCCB, p_card);
}
-#ident "$Id: FlashPoint.c,v 1.1 1999/04/26 05:53:56 tb Exp $"
+
/*----------------------------------------------------------------------
*
*
@@ -9351,7 +9348,7 @@ void hostDataXferRestart(PSCCB currSCCB)
currSCCB->Sccb_XferCnt = currSCCB->DataLength - currSCCB->Sccb_ATC;
}
}
-#ident "$Id: FlashPoint.c,v 1.1 1999/04/26 05:53:56 tb Exp $"
+
/*----------------------------------------------------------------------
*
*
@@ -10581,7 +10578,7 @@ void scsavdi(UCHAR p_card, ULONG p_port)
utilEEWrite(p_port, sum_data, EEPROM_CHECK_SUM/2);
utilEEWriteOnOff(p_port,0); /* Turn off write access */
}
-#ident "$Id: FlashPoint.c,v 1.1 1999/04/26 05:53:56 tb Exp $"
+
/*----------------------------------------------------------------------
*
*
@@ -11026,7 +11023,7 @@ void DiagEEPROM(ULONG p_port)
}
-#ident "$Id: FlashPoint.c,v 1.1 1999/04/26 05:53:56 tb Exp $"
+
/*----------------------------------------------------------------------
*
*
diff --git a/linux/src/drivers/scsi/advansys.c b/linux/src/drivers/scsi/advansys.c
index 05cd0f6e..ef61fac9 100644
--- a/linux/src/drivers/scsi/advansys.c
+++ b/linux/src/drivers/scsi/advansys.c
@@ -2033,8 +2033,8 @@ STATIC void AscSetISAPNPWaitForKey(void);
STATIC uchar AscGetChipIRQ(PortAddr, ushort);
STATIC uchar AscSetChipIRQ(PortAddr, uchar, ushort);
STATIC ushort AscGetChipBiosAddress(PortAddr, ushort);
-STATIC int DvcEnterCritical(void);
-STATIC void DvcLeaveCritical(int);
+STATIC long DvcEnterCritical(void);
+STATIC void DvcLeaveCritical(long);
STATIC void DvcInPortWords(PortAddr, ushort *, int);
STATIC void DvcOutPortWords(PortAddr, ushort *, int);
STATIC void DvcOutPortDWords(PortAddr, ulong *, int);
@@ -2870,8 +2870,8 @@ typedef struct adv_scsi_req_q {
/*
* Device drivers must define the following functions.
*/
-STATIC int DvcEnterCritical(void);
-STATIC void DvcLeaveCritical(int);
+STATIC long DvcEnterCritical(void);
+STATIC void DvcLeaveCritical(long);
STATIC void DvcSleepMilliSecond(ulong);
STATIC uchar DvcAdvReadPCIConfigByte(ADV_DVC_VAR *, ushort);
STATIC void DvcAdvWritePCIConfigByte(ADV_DVC_VAR *, ushort, uchar);
@@ -5400,7 +5400,7 @@ advansys_queuecommand(Scsi_Cmnd *scp, void (*done)(Scsi_Cmnd *))
{
struct Scsi_Host *shp;
asc_board_t *boardp;
- int flags;
+ long flags;
Scsi_Cmnd *done_scp;
shp = scp->host;
@@ -5495,7 +5495,7 @@ advansys_abort(Scsi_Cmnd *scp)
asc_board_t *boardp;
ASC_DVC_VAR *asc_dvc_varp;
ADV_DVC_VAR *adv_dvc_varp;
- int flags;
+ long flags;
int do_scsi_done;
int scp_found;
Scsi_Cmnd *done_scp = NULL;
@@ -5734,7 +5734,7 @@ advansys_reset(Scsi_Cmnd *scp, unsigned int reset_flags)
asc_board_t *boardp;
ASC_DVC_VAR *asc_dvc_varp;
ADV_DVC_VAR *adv_dvc_varp;
- int flags;
+ long flags;
Scsi_Cmnd *done_scp = NULL, *last_scp = NULL;
Scsi_Cmnd *tscp, *new_last_scp;
int do_scsi_done;
@@ -6269,7 +6269,7 @@ advansys_interrupt(int irq, void *dev_id, struct pt_regs *regs)
#endif /* version >= v1.3.70 */
{
#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,95)
- int flags;
+ long flags;
#else /* version >= v2.1.95 */
unsigned long flags;
#endif /* version >= v2.1.95 */
@@ -9194,7 +9194,7 @@ asc_prt_line(char *buf, int buflen, char *fmt, ...)
ret = vsprintf(s, fmt, args);
ASC_ASSERT(ret < ASC_PRTLINE_SIZE);
if (buf == NULL) {
- (void) printk(s);
+ (void) printk("%s", s);
ret = 0;
} else {
ret = ASC_MIN(buflen, ret);
@@ -9227,10 +9227,10 @@ DvcSleepMilliSecond(ulong n)
}
}
-STATIC int
+STATIC long
DvcEnterCritical(void)
{
- int flags;
+ long flags;
save_flags(flags);
cli();
@@ -9238,7 +9238,7 @@ DvcEnterCritical(void)
}
STATIC void
-DvcLeaveCritical(int flags)
+DvcLeaveCritical(long flags)
{
restore_flags(flags);
}
@@ -10198,7 +10198,7 @@ asc_prt_hex(char *f, uchar *s, int l)
STATIC int
interrupts_enabled(void)
{
- int flags;
+ long flags;
save_flags(flags);
if (flags & 0x0200) {
@@ -15078,7 +15078,7 @@ AdvISR(ADV_DVC_VAR *asc_dvc)
uchar int_stat;
ushort next_done_loc, target_bit;
int completed_q;
- int flags;
+ long flags;
ADV_SCSI_REQ_Q *scsiq;
ASC_REQ_SENSE *sense_data;
int ret;
diff --git a/linux/src/drivers/scsi/aha152x.c b/linux/src/drivers/scsi/aha152x.c
index a5724bf8..44fe1b0c 100644
--- a/linux/src/drivers/scsi/aha152x.c
+++ b/linux/src/drivers/scsi/aha152x.c
@@ -1539,7 +1539,7 @@ void aha152x_done(struct Scsi_Host *shpnt, int error)
void aha152x_intr(int irqno, void *dev_id, struct pt_regs * regs)
{
struct Scsi_Host *shpnt = aha152x_host[irqno-IRQ_MIN];
- unsigned int flags;
+ unsigned long flags;
int done=0, phase;
#if defined(DEBUG_RACE)
diff --git a/linux/src/drivers/scsi/aha1542.c b/linux/src/drivers/scsi/aha1542.c
index 68954d75..cc27e5c0 100644
--- a/linux/src/drivers/scsi/aha1542.c
+++ b/linux/src/drivers/scsi/aha1542.c
@@ -356,7 +356,7 @@ static void aha1542_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
void (*my_done)(Scsi_Cmnd *) = NULL;
int errstatus, mbi, mbo, mbistatus;
int number_serviced;
- unsigned int flags;
+ unsigned long flags;
struct Scsi_Host * shost;
Scsi_Cmnd * SCtmp;
int flag;
@@ -875,7 +875,7 @@ void aha1542_setup( char *str, int *ints)
if (ints[0] < 1 || ints[0] > 4)
{
printk("aha1542: %s\n", str );
- printk(ahausage);
+ printk("%s", ahausage);
printk("aha1542: Wrong parameters may cause system malfunction.. We try anyway..\n");
}
@@ -905,7 +905,7 @@ void aha1542_setup( char *str, int *ints)
break;
default:
printk("aha1542: %s\n", str );
- printk(ahausage);
+ printk("%s", ahausage);
printk("aha1542: Valid values for DMASPEED are 5-8, 10 MB/s. Using jumper defaults.\n");
break;
}
diff --git a/linux/src/drivers/scsi/aic7xxx.c b/linux/src/drivers/scsi/aic7xxx.c
index be817bb7..93bed411 100644
--- a/linux/src/drivers/scsi/aic7xxx.c
+++ b/linux/src/drivers/scsi/aic7xxx.c
@@ -2800,7 +2800,7 @@ aic7xxx_done_cmds_complete(struct aic7xxx_host *p)
{
Scsi_Cmnd *cmd;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
- unsigned int cpu_flags = 0;
+ unsigned long cpu_flags = 0;
#endif
DRIVER_LOCK
diff --git a/linux/src/drivers/scsi/eata.c b/linux/src/drivers/scsi/eata.c
index ce859ced..49f08275 100644
--- a/linux/src/drivers/scsi/eata.c
+++ b/linux/src/drivers/scsi/eata.c
@@ -881,7 +881,7 @@ __initfunc (static inline int port_detect \
sprintf(name, "%s%d", driver_name, j);
- printk("probing eata on %lx\n", port_base);
+ printk("\rprobing eata on %lx", port_base);
if(check_region(port_base, REGION_SIZE)) {
printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base);
diff --git a/linux/src/drivers/scsi/gdth.c b/linux/src/drivers/scsi/gdth.c
index 6cc2b780..0a4bef89 100644
--- a/linux/src/drivers/scsi/gdth.c
+++ b/linux/src/drivers/scsi/gdth.c
@@ -2851,8 +2851,9 @@ __initfunc (int gdth_detect(Scsi_Host_Template *shtp))
ushort eisa_slot,device_id,index;
gdth_pci_str pcistr;
int i,j,hanum;
+#if LINUX_VERSION_CODE < 0x020000
unchar b;
-
+#endif
#ifdef DEBUG_GDTH
printk("GDT: This driver contains debugging information !! Trace level = %d\n",
@@ -2878,7 +2879,7 @@ __initfunc (int gdth_detect(Scsi_Host_Template *shtp))
}
/* initializations */
- gdth_polling = TRUE; b = 0;
+ gdth_polling = TRUE;
for (i=0; i<GDTH_MAXCMDS; ++i)
for (j=0; j<MAXHA; ++j)
gdth_cmd_tab[i][j].cmnd = UNUSED_CMND;
diff --git a/linux/src/drivers/scsi/hosts.c b/linux/src/drivers/scsi/hosts.c
index 07646041..0f1beddd 100644
--- a/linux/src/drivers/scsi/hosts.c
+++ b/linux/src/drivers/scsi/hosts.c
@@ -133,6 +133,10 @@
#include "53c7,8xx.h"
#endif
+#ifdef CONFIG_SCSI_SYM53C8XX
+#include "sym53c8xx.h"
+#endif
+
#ifdef CONFIG_SCSI_NCR53C8XX
#include "ncr53c8xx.h"
#endif
@@ -298,6 +302,9 @@ static Scsi_Host_Template builtin_scsi_hosts[] =
#ifdef CONFIG_SCSI_NCR53C7xx
NCR53c7xx,
#endif
+#ifdef CONFIG_SCSI_SYM53C8XX
+ SYM53C8XX,
+#endif
#ifdef CONFIG_SCSI_NCR53C8XX
NCR53C8XX,
#endif
@@ -465,6 +472,7 @@ unsigned int scsi_init()
* Initialize our semaphores. -1 is interpreted to mean
* "inactive" - where as 0 will indicate a time out condition.
*/
+ printk("\rprobing scsi %d/%d: %s \e[K", tpnt-builtin_scsi_hosts, MAX_SCSI_HOSTS, tpnt->name);
pcount = next_scsi_host;
if ((tpnt->detect) &&
@@ -489,6 +497,7 @@ unsigned int scsi_init()
#endif
}
}
+ printk("\ndone\n");
for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
{
diff --git a/linux/src/drivers/scsi/ncr53c8xx.c b/linux/src/drivers/scsi/ncr53c8xx.c
index 1be3d9fe..0a584297 100644
--- a/linux/src/drivers/scsi/ncr53c8xx.c
+++ b/linux/src/drivers/scsi/ncr53c8xx.c
@@ -272,8 +272,10 @@ typedef u32 u_int32;
#define u_int unsigned int
#define u_long unsigned long
+#ifndef MACH
typedef u_long vm_offset_t;
typedef int vm_size_t;
+#endif
#define bcopy(s, d, n) memcpy((d), (s), (n))
#define bzero(d, n) memset((d), 0, (n))
diff --git a/linux/src/drivers/scsi/ncr53c8xx.h b/linux/src/drivers/scsi/ncr53c8xx.h
index cc009ca6..03424387 100644
--- a/linux/src/drivers/scsi/ncr53c8xx.h
+++ b/linux/src/drivers/scsi/ncr53c8xx.h
@@ -81,7 +81,7 @@
#endif
#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
-# define SCSI_NCR_PROC_INFO_SUPPORT
+//# define SCSI_NCR_PROC_INFO_SUPPORT
#endif
#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,72)
diff --git a/linux/src/drivers/scsi/ppa.c b/linux/src/drivers/scsi/ppa.c
index 5063a677..fd224f9e 100644
--- a/linux/src/drivers/scsi/ppa.c
+++ b/linux/src/drivers/scsi/ppa.c
@@ -259,7 +259,6 @@ int ppa_detect(Scsi_Host_Template * host)
struct Scsi_Host *hreg;
int ports;
int i, nhosts;
- unsigned short ppb;
printk("ppa: Version %s\n", PPA_VERSION);
nhosts = 0;
@@ -267,7 +266,7 @@ int ppa_detect(Scsi_Host_Template * host)
for (i = 0; i < parbus_no; i++) {
if (parbus_base[i] == 0x0000)
continue;
- ppb = ppa_hosts[i].base = parbus_base[i];
+ ppa_hosts[i].base = parbus_base[i];
/* sanity checks */
if (check_region(parbus_base[i],
@@ -518,7 +517,7 @@ static inline int ppa_byte_in(unsigned short base, char *buffer, int len)
" shrb $4,%%al\n" \
" orb %%al," #reg "\n"
-static inline int ppa_nibble_in(unsigned short str_p, char *buffer, int len)
+static inline int ppa_nibble_in(unsigned short base, char *buffer, int len)
{
for (; len; len--) {
unsigned char h;
diff --git a/linux/src/drivers/scsi/sym53c8xx.c b/linux/src/drivers/scsi/sym53c8xx.c
new file mode 100644
index 00000000..f4969549
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx.c
@@ -0,0 +1,14696 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+/*
+** Supported SCSI features:
+** Synchronous data transfers
+** Wide16 SCSI BUS
+** Disconnection/Reselection
+** Tagged command queuing
+** SCSI Parity checking
+**
+** Supported NCR/SYMBIOS chips:
+** 53C810A (8 bits, Fast 10, no rom BIOS)
+** 53C825A (Wide, Fast 10, on-board rom BIOS)
+** 53C860 (8 bits, Fast 20, no rom BIOS)
+** 53C875 (Wide, Fast 20, on-board rom BIOS)
+** 53C876 (Wide, Fast 20 Dual, on-board rom BIOS)
+** 53C895 (Wide, Fast 40, on-board rom BIOS)
+** 53C895A (Wide, Fast 40, on-board rom BIOS)
+** 53C896 (Wide, Fast 40 Dual, on-board rom BIOS)
+** 53C897 (Wide, Fast 40 Dual, on-board rom BIOS)
+** 53C1510D (Wide, Fast 40 Dual, on-board rom BIOS)
+** 53C1010 (Wide, Fast 80 Dual, on-board rom BIOS)
+** 53C1010_66(Wide, Fast 80 Dual, on-board rom BIOS, 33/66MHz PCI)
+**
+** Other features:
+** Memory mapped IO
+** Module
+** Shared IRQ
+*/
+
+/*
+** Name and version of the driver
+*/
+#define SCSI_NCR_DRIVER_NAME "sym53c8xx-1.7.1-20000726"
+
+#define SCSI_NCR_DEBUG_FLAGS (0)
+
+#define NAME53C "sym53c"
+#define NAME53C8XX "sym53c8xx"
+
+/*==========================================================
+**
+** Include files
+**
+**==========================================================
+*/
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,17)
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+#include <asm/spinlock.h>
+#endif
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/stat.h>
+
+#include <linux/version.h>
+#include <linux/blk.h>
+
+#ifdef CONFIG_ALL_PPC
+#include <asm/prom.h>
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,35)
+#include <linux/init.h>
+#endif
+
+#ifndef __init
+#define __init
+#endif
+#ifndef __initdata
+#define __initdata
+#endif
+
+#if LINUX_VERSION_CODE <= LinuxVersionCode(2,1,92)
+#include <linux/bios32.h>
+#endif
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+#include "sd.h"
+
+#include <linux/types.h>
+
+/*
+** Define BITS_PER_LONG for earlier linux versions.
+*/
+#ifndef BITS_PER_LONG
+#if (~0UL) == 0xffffffffUL
+#define BITS_PER_LONG 32
+#else
+#define BITS_PER_LONG 64
+#endif
+#endif
+
+/*
+** Define the BSD style u_int32 and u_int64 type.
+** Are in fact u_int32_t and u_int64_t :-)
+*/
+typedef u32 u_int32;
+typedef u64 u_int64;
+
+#include "sym53c8xx.h"
+
+/*
+** Donnot compile integrity checking code for Linux-2.3.0
+** and above since SCSI data structures are not ready yet.
+*/
+/* #if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0) */
+#if 0
+#define SCSI_NCR_INTEGRITY_CHECKING
+#endif
+
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+
+/*
+** Hmmm... What complex some PCI-HOST bridges actually are,
+** despite the fact that the PCI specifications are looking
+** so smart and simple! ;-)
+*/
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,47)
+#define SCSI_NCR_DYNAMIC_DMA_MAPPING
+#endif
+
+/*==========================================================
+**
+** A la VMS/CAM-3 queue management.
+** Implemented from linux list management.
+**
+**==========================================================
+*/
+
+typedef struct xpt_quehead {
+ struct xpt_quehead *flink; /* Forward pointer */
+ struct xpt_quehead *blink; /* Backward pointer */
+} XPT_QUEHEAD;
+
+#define xpt_que_init(ptr) do { \
+ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
+} while (0)
+
+static inline void __xpt_que_add(struct xpt_quehead * new,
+ struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = new;
+ new->flink = flink;
+ new->blink = blink;
+ blink->flink = new;
+}
+
+static inline void __xpt_que_del(struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = blink;
+ blink->flink = flink;
+}
+
+static inline int xpt_que_empty(struct xpt_quehead *head)
+{
+ return head->flink == head;
+}
+
+static inline void xpt_que_splice(struct xpt_quehead *list,
+ struct xpt_quehead *head)
+{
+ struct xpt_quehead *first = list->flink;
+
+ if (first != list) {
+ struct xpt_quehead *last = list->blink;
+ struct xpt_quehead *at = head->flink;
+
+ first->blink = head;
+ head->flink = first;
+
+ last->flink = at;
+ at->blink = last;
+ }
+}
+
+#define xpt_que_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+
+#define xpt_insque(new, pos) __xpt_que_add(new, pos, (pos)->flink)
+
+#define xpt_remque(el) __xpt_que_del((el)->blink, (el)->flink)
+
+#define xpt_insque_head(new, head) __xpt_que_add(new, head, (head)->flink)
+
+static inline struct xpt_quehead *xpt_remque_head(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->flink;
+
+ if (elem != head)
+ __xpt_que_del(head, elem->flink);
+ else
+ elem = 0;
+ return elem;
+}
+
+#define xpt_insque_tail(new, head) __xpt_que_add(new, (head)->blink, head)
+
+static inline struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->blink;
+
+ if (elem != head)
+ __xpt_que_del(elem->blink, head);
+ else
+ elem = 0;
+ return elem;
+}
+
+/*==========================================================
+**
+** Configuration and Debugging
+**
+**==========================================================
+*/
+
+/*
+** SCSI address of this device.
+** The boot routines should have set it.
+** If not, use this.
+*/
+
+#ifndef SCSI_NCR_MYADDR
+#define SCSI_NCR_MYADDR (7)
+#endif
+
+/*
+** The maximum number of tags per logic unit.
+** Used only for devices that support tags.
+*/
+
+#ifndef SCSI_NCR_MAX_TAGS
+#define SCSI_NCR_MAX_TAGS (8)
+#endif
+
+/*
+** TAGS are actually unlimited (256 tags/lun).
+** But Linux only supports 255. :)
+*/
+#if SCSI_NCR_MAX_TAGS > 255
+#define MAX_TAGS 255
+#else
+#define MAX_TAGS SCSI_NCR_MAX_TAGS
+#endif
+
+/*
+** Since the ncr chips only have a 8 bit ALU, we try to be clever
+** about offset calculation in the TASK TABLE per LUN that is an
+** array of DWORDS = 4 bytes.
+*/
+#if MAX_TAGS > (512/4)
+#define MAX_TASKS (1024/4)
+#elif MAX_TAGS > (256/4)
+#define MAX_TASKS (512/4)
+#else
+#define MAX_TASKS (256/4)
+#endif
+
+/*
+** This one means 'NO TAG for this job'
+*/
+#define NO_TAG (256)
+
+/*
+** Number of targets supported by the driver.
+** n permits target numbers 0..n-1.
+** Default is 16, meaning targets #0..#15.
+** #7 .. is myself.
+*/
+
+#ifdef SCSI_NCR_MAX_TARGET
+#define MAX_TARGET (SCSI_NCR_MAX_TARGET)
+#else
+#define MAX_TARGET (16)
+#endif
+
+/*
+** Number of logic units supported by the driver.
+** n enables logic unit numbers 0..n-1.
+** The common SCSI devices require only
+** one lun, so take 1 as the default.
+*/
+
+#ifdef SCSI_NCR_MAX_LUN
+#define MAX_LUN 64
+#else
+#define MAX_LUN (1)
+#endif
+
+/*
+** Asynchronous pre-scaler (ns). Shall be 40 for
+** the SCSI timings to be compliant.
+*/
+
+#ifndef SCSI_NCR_MIN_ASYNC
+#define SCSI_NCR_MIN_ASYNC (40)
+#endif
+
+/*
+** The maximum number of jobs scheduled for starting.
+** We allocate 4 entries more than the value we announce
+** to the SCSI upper layer. Guess why ! :-)
+*/
+
+#ifdef SCSI_NCR_CAN_QUEUE
+#define MAX_START (SCSI_NCR_CAN_QUEUE + 4)
+#else
+#define MAX_START (MAX_TARGET + 7 * MAX_TAGS)
+#endif
+
+/*
+** We donnot want to allocate more than 1 PAGE for the
+** the start queue and the done queue. We hard-code entry
+** size to 8 in order to let cpp do the checking.
+** Allows 512-4=508 pending IOs for i386 but Linux seems for
+** now not able to provide the driver with this amount of IOs.
+*/
+#if MAX_START > PAGE_SIZE/8
+#undef MAX_START
+#define MAX_START (PAGE_SIZE/8)
+#endif
+
+/*
+** The maximum number of segments a transfer is split into.
+** We support up to 127 segments for both read and write.
+*/
+
+#define MAX_SCATTER (SCSI_NCR_MAX_SCATTER)
+#define SCR_SG_SIZE (2)
+
+/*
+** other
+*/
+
+#define NCR_SNOOP_TIMEOUT (1000000)
+
+/*==========================================================
+**
+** Miscallaneous BSDish defines.
+**
+**==========================================================
+*/
+
+#define u_char unsigned char
+#define u_short unsigned short
+#define u_int unsigned int
+#define u_long unsigned long
+
+#ifndef bcopy
+#define bcopy(s, d, n) memcpy((d), (s), (n))
+#endif
+
+#ifndef bzero
+#define bzero(d, n) memset((d), 0, (n))
+#endif
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) (&((t *)0)->m))
+#endif
+
+/*
+** Simple Wrapper to kernel PCI bus interface.
+**
+** This wrapper allows to get rid of old kernel PCI interface
+** and still allows to preserve linux-2.0 compatibilty.
+** In fact, it is mostly an incomplete emulation of the new
+** PCI code for pre-2.2 kernels. When kernel-2.0 support
+** will be dropped, we will just have to remove most of this
+** code.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0)
+
+typedef struct pci_dev *pcidev_t;
+#define PCIDEV_NULL (0)
+#define PciBusNumber(d) (d)->bus->number
+#define PciDeviceFn(d) (d)->devfn
+#define PciVendorId(d) (d)->vendor
+#define PciDeviceId(d) (d)->device
+#define PciIrqLine(d) (d)->irq
+
+#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
+
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->resource[index].start;
+ if ((pdev->resource[index].flags & 0x7) == 0x4)
+ ++index;
+ return ++index;
+}
+#else
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->base_address[index++];
+ if ((*base & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ *base |= (((u_long)pdev->base_address[index]) << 32);
+#endif
+ ++index;
+ }
+ return index;
+}
+#endif
+
+#else /* Incomplete emulation of current PCI code for pre-2.2 kernels */
+
+typedef unsigned int pcidev_t;
+#define PCIDEV_NULL (~0u)
+#define PciBusNumber(d) ((d)>>8)
+#define PciDeviceFn(d) ((d)&0xff)
+#define __PciDev(busn, devfn) (((busn)<<8)+(devfn))
+
+#define pci_present pcibios_present
+
+#define pci_read_config_byte(d, w, v) \
+ pcibios_read_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_word(d, w, v) \
+ pcibios_read_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_dword(d, w, v) \
+ pcibios_read_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+#define pci_write_config_byte(d, w, v) \
+ pcibios_write_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_word(d, w, v) \
+ pcibios_write_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_dword(d, w, v) \
+ pcibios_write_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+static pcidev_t __init
+pci_find_device(unsigned int vendor, unsigned int device, pcidev_t prev)
+{
+ static unsigned short pci_index;
+ int retv;
+ unsigned char bus_number, device_fn;
+
+ if (prev == PCIDEV_NULL)
+ pci_index = 0;
+ else
+ ++pci_index;
+ retv = pcibios_find_device (vendor, device, pci_index,
+ &bus_number, &device_fn);
+ return retv ? PCIDEV_NULL : __PciDev(bus_number, device_fn);
+}
+
+static u_short __init PciVendorId(pcidev_t dev)
+{
+ u_short vendor_id;
+ pci_read_config_word(dev, PCI_VENDOR_ID, &vendor_id);
+ return vendor_id;
+}
+
+static u_short __init PciDeviceId(pcidev_t dev)
+{
+ u_short device_id;
+ pci_read_config_word(dev, PCI_DEVICE_ID, &device_id);
+ return device_id;
+}
+
+static u_int __init PciIrqLine(pcidev_t dev)
+{
+ u_char irq;
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ return irq;
+}
+
+static int __init
+pci_get_base_address(pcidev_t dev, int offset, u_long *base)
+{
+ u_int32 tmp;
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base = tmp;
+ offset += sizeof(u_int32);
+ if ((tmp & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base |= (((u_long)tmp) << 32);
+#endif
+ offset += sizeof(u_int32);
+ }
+ return offset;
+}
+
+#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0) */
+
+/*==========================================================
+**
+** Debugging tags
+**
+**==========================================================
+*/
+
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_POINTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_IC (0x0800)
+
+/*
+** Enable/Disable debug messages.
+** Can be changed at runtime too.
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
+ #define DEBUG_FLAGS ncr_debug
+#else
+ #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
+#endif
+
+/*
+** SMP threading.
+**
+** Assuming that SMP systems are generally high end systems and may
+** use several SCSI adapters, we are using one lock per controller
+** instead of some global one. For the moment (linux-2.1.95), driver's
+** entry points are called with the 'io_request_lock' lock held, so:
+** - We are uselessly loosing a couple of micro-seconds to lock the
+** controller data structure.
+** - But the driver is not broken by design for SMP and so can be
+** more resistant to bugs or bad changes in the IO sub-system code.
+** - A small advantage could be that the interrupt code is grained as
+** wished (e.g.: threaded by controller).
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+
+spinlock_t sym53c8xx_lock = SPIN_LOCK_UNLOCKED;
+#define NCR_LOCK_DRIVER(flags) spin_lock_irqsave(&sym53c8xx_lock, flags)
+#define NCR_UNLOCK_DRIVER(flags) spin_unlock_irqrestore(&sym53c8xx_lock,flags)
+
+#define NCR_INIT_LOCK_NCB(np) spin_lock_init(&np->smp_lock);
+#define NCR_LOCK_NCB(np, flags) spin_lock_irqsave(&np->smp_lock, flags)
+#define NCR_UNLOCK_NCB(np, flags) spin_unlock_irqrestore(&np->smp_lock, flags)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) \
+ spin_lock_irqsave(&io_request_lock, flags)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) \
+ spin_unlock_irqrestore(&io_request_lock, flags)
+
+#else
+
+#define NCR_LOCK_DRIVER(flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_DRIVER(flags) do { restore_flags(flags); } while (0)
+
+#define NCR_INIT_LOCK_NCB(np) do { } while (0)
+#define NCR_LOCK_NCB(np, flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_NCB(np, flags) do { restore_flags(flags); } while (0)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) do {;} while (0)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) do {;} while (0)
+
+#endif
+
+/*
+** Memory mapped IO
+**
+** Since linux-2.1, we must use ioremap() to map the io memory space.
+** iounmap() to unmap it. That allows portability.
+** Linux 1.3.X and 2.0.X allow to remap physical pages addresses greater
+** than the highest physical memory address to kernel virtual pages with
+** vremap() / vfree(). That was not portable but worked with i386
+** architecture.
+*/
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#define ioremap vremap
+#define iounmap vfree
+#endif
+
+#ifdef __sparc__
+# include <asm/irq.h>
+# if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0)
+ /* ioremap/iounmap broken in 2.2.x on Sparc. -DaveM */
+# define ioremap(base, size) ((u_long) __va(base))
+# define iounmap(vaddr)
+# endif
+# define pcivtobus(p) bus_dvma_to_mem(p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((void *)(a), (const void *)(b), (c))
+#elif defined(__alpha__)
+# define pcivtobus(p) ((p) & 0xfffffffful)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#else /* others */
+# define pcivtobus(p) (p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#endif
+
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+static u_long __init remap_pci_mem(u_long base, u_long size)
+{
+ u_long page_base = ((u_long) base) & PAGE_MASK;
+ u_long page_offs = ((u_long) base) - page_base;
+ u_long page_remapped = (u_long) ioremap(page_base, page_offs+size);
+
+ return page_remapped? (page_remapped + page_offs) : 0UL;
+}
+
+static void __init unmap_pci_mem(u_long vaddr, u_long size)
+{
+ if (vaddr)
+ iounmap((void *) (vaddr & PAGE_MASK));
+}
+
+#endif /* not def SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+
+/*
+** Insert a delay in micro-seconds and milli-seconds.
+** -------------------------------------------------
+** Under Linux, udelay() is restricted to delay < 1 milli-second.
+** In fact, it generally works for up to 1 second delay.
+** Since 2.1.105, the mdelay() function is provided for delays
+** in milli-seconds.
+** Under 2.0 kernels, udelay() is an inline function that is very
+** inaccurate on Pentium processors.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,105)
+#define UDELAY udelay
+#define MDELAY mdelay
+#else
+static void UDELAY(long us) { udelay(us); }
+static void MDELAY(long ms) { while (ms--) UDELAY(1000); }
+#endif
+
+/*
+** Simple power of two buddy-like allocator
+** ----------------------------------------
+** This simple code is not intended to be fast, but to provide
+** power of 2 aligned memory allocations.
+** Since the SCRIPTS processor only supplies 8 bit arithmetic,
+** this allocator allows simple and fast address calculations
+** from the SCRIPTS code. In addition, cache line alignment
+** is guaranteed for power of 2 cache line size.
+** Enhanced in linux-2.3.44 to provide a memory pool per pcidev
+** to support dynamic dma mapping. (I would have preferred a
+** real bus astraction, btw).
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+#define __GetFreePages(flags, order) __get_free_pages(flags, order)
+#else
+#define __GetFreePages(flags, order) __get_free_pages(flags, order, 0)
+#endif
+
+#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
+#if PAGE_SIZE >= 8192
+#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
+#else
+#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
+#endif
+#define MEMO_FREE_UNUSED /* Free unused pages immediately */
+#define MEMO_WARN 1
+#define MEMO_GFP_FLAGS GFP_ATOMIC
+#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
+#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
+#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
+
+typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
+typedef pcidev_t m_bush_t; /* Something that addresses DMAable */
+
+typedef struct m_link { /* Link between free memory chunks */
+ struct m_link *next;
+} m_link_s;
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+typedef struct m_vtob { /* Virtual to Bus address translation */
+ struct m_vtob *next;
+ m_addr_t vaddr;
+ m_addr_t baddr;
+} m_vtob_s;
+#define VTOB_HASH_SHIFT 5
+#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
+#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
+#define VTOB_HASH_CODE(m) \
+ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
+#endif
+
+typedef struct m_pool { /* Memory pool of a given kind */
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ m_bush_t bush;
+ m_addr_t (*getp)(struct m_pool *);
+ void (*freep)(struct m_pool *, m_addr_t);
+#define M_GETP() mp->getp(mp)
+#define M_FREEP(p) mp->freep(mp, p)
+#define GetPages() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define FreePages(p) free_pages(p, MEMO_PAGE_ORDER)
+ int nump;
+ m_vtob_s *(vtob[VTOB_HASH_SIZE]);
+ struct m_pool *next;
+#else
+#define M_GETP() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define M_FREEP(p) free_pages(p, MEMO_PAGE_ORDER)
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+ struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
+} m_pool_s;
+
+static void *___m_alloc(m_pool_s *mp, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ int j;
+ m_addr_t a;
+ m_link_s *h = mp->h;
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return 0;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ j = i;
+ while (!h[j].next) {
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ h[j].next = (m_link_s *) M_GETP();
+ if (h[j].next)
+ h[j].next->next = 0;
+ break;
+ }
+ ++j;
+ s <<= 1;
+ }
+ a = (m_addr_t) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (m_link_s *) (a+s);
+ h[j].next->next = 0;
+ }
+ }
+#ifdef DEBUG
+ printk("___m_alloc(%d) = %p\n", size, (void *) a);
+#endif
+ return (void *) a;
+}
+
+static void ___m_free(m_pool_s *mp, void *ptr, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ m_link_s *q;
+ m_addr_t a, b;
+ m_link_s *h = mp->h;
+
+#ifdef DEBUG
+ printk("___m_free(%p, %d)\n", ptr, size);
+#endif
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ a = (m_addr_t) ptr;
+
+ while (1) {
+#ifdef MEMO_FREE_UNUSED
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ M_FREEP(a);
+ break;
+ }
+#endif
+ b = a ^ s;
+ q = &h[i];
+ while (q->next && q->next != (m_link_s *) b) {
+ q = q->next;
+ }
+ if (!q->next) {
+ ((m_link_s *) a)->next = h[i].next;
+ h[i].next = (m_link_s *) a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ ++i;
+ }
+}
+
+static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
+{
+ void *p;
+
+ p = ___m_alloc(mp, size);
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("new %-10s[%4d] @%p.\n", name, size, p);
+
+ if (p)
+ bzero(p, size);
+ else if (uflags & MEMO_WARN)
+ printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
+
+ return p;
+}
+
+#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
+
+static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
+{
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
+
+ ___m_free(mp, ptr, size);
+
+}
+
+/*
+ * With pci bus iommu support, we use a default pool of unmapped memory
+ * for memory we donnot need to DMA from/to and one pool per pcidev for
+ * memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+static m_pool_s mp0;
+
+#else
+
+static m_addr_t ___mp0_getp(m_pool_s *mp)
+{
+ m_addr_t m = GetPages();
+ if (m)
+ ++mp->nump;
+ return m;
+}
+
+static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
+{
+ FreePages(m);
+ --mp->nump;
+}
+
+static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+static void *m_calloc(int size, char *name)
+{
+ u_long flags;
+ void *m;
+ NCR_LOCK_DRIVER(flags);
+ m = __m_calloc(&mp0, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+ return m;
+}
+
+static void m_free(void *ptr, int size, char *name)
+{
+ u_long flags;
+ NCR_LOCK_DRIVER(flags);
+ __m_free(&mp0, ptr, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+/*
+ * DMAable pools.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Without pci bus iommu support, all the memory is assumed DMAable */
+
+#define __m_calloc_dma(b, s, n) m_calloc(s, n)
+#define __m_free_dma(b, p, s, n) m_free(p, s, n)
+#define __vtobus(b, p) virt_to_bus(p)
+
+#else
+
+/*
+ * With pci bus iommu support, we maintain one pool per pcidev and a
+ * hashed reverse table for virtual to bus physical address translations.
+ */
+static m_addr_t ___dma_getp(m_pool_s *mp)
+{
+ m_addr_t vp;
+ m_vtob_s *vbp;
+
+ vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
+ if (vbp) {
+ dma_addr_t daddr;
+ vp = (m_addr_t) pci_alloc_consistent(mp->bush,
+ PAGE_SIZE<<MEMO_PAGE_ORDER,
+ &daddr);
+ if (vp) {
+ int hc = VTOB_HASH_CODE(vp);
+ vbp->vaddr = vp;
+ vbp->baddr = daddr;
+ vbp->next = mp->vtob[hc];
+ mp->vtob[hc] = vbp;
+ ++mp->nump;
+ return vp;
+ }
+ else
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ }
+ return 0;
+}
+
+static void ___dma_freep(m_pool_s *mp, m_addr_t m)
+{
+ m_vtob_s **vbpp, *vbp;
+ int hc = VTOB_HASH_CODE(m);
+
+ vbpp = &mp->vtob[hc];
+ while (*vbpp && (*vbpp)->vaddr != m)
+ vbpp = &(*vbpp)->next;
+ if (*vbpp) {
+ vbp = *vbpp;
+ *vbpp = (*vbpp)->next;
+ pci_free_consistent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
+ (void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ --mp->nump;
+ }
+}
+
+static inline m_pool_s *___get_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
+ return mp;
+}
+
+static m_pool_s *___cre_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
+ if (mp) {
+ bzero(mp, sizeof(*mp));
+ mp->bush = bush;
+ mp->getp = ___dma_getp;
+ mp->freep = ___dma_freep;
+ mp->next = mp0.next;
+ mp0.next = mp;
+ }
+ return mp;
+}
+
+static void ___del_dma_pool(m_pool_s *p)
+{
+ struct m_pool **pp = &mp0.next;
+
+ while (*pp && *pp != p)
+ pp = &(*pp)->next;
+ if (*pp) {
+ *pp = (*pp)->next;
+ __m_free(&mp0, p, sizeof(*p), "MPOOL");
+ }
+}
+
+static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+ void *m = 0;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (!mp)
+ mp = ___cre_dma_pool(bush);
+ if (mp)
+ m = __m_calloc(mp, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+
+ return m;
+}
+
+static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp)
+ __m_free(mp, m, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+static m_addr_t __vtobus(m_bush_t bush, void *m)
+{
+ u_long flags;
+ m_pool_s *mp;
+ int hc = VTOB_HASH_CODE(m);
+ m_vtob_s *vp = 0;
+ m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp) {
+ vp = mp->vtob[hc];
+ while (vp && (m_addr_t) vp->vaddr != a)
+ vp = vp->next;
+ }
+ NCR_UNLOCK_DRIVER(flags);
+ return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
+}
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->pdev, s, n)
+#define _m_free_dma(np, p, s, n) __m_free_dma(np->pdev, p, s, n)
+#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
+#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
+#define _vtobus(np, p) __vtobus(np->pdev, p)
+#define vtobus(p) _vtobus(np, p)
+
+/*
+ * Deal with DMA mapping/unmapping.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Linux versions prior to pci bus iommu kernel interface */
+
+#define __unmap_scsi_data(pdev, cmd) do {; } while (0)
+#define __map_scsi_single_data(pdev, cmd) (__vtobus(pdev,(cmd)->request_buffer))
+#define __map_scsi_sg_data(pdev, cmd) ((cmd)->use_sg)
+#define __sync_scsi_data(pdev, cmd) do {; } while (0)
+
+#define scsi_sg_dma_address(sc) vtobus((sc)->address)
+#define scsi_sg_dma_len(sc) ((sc)->length)
+
+#else
+
+/* Linux version with pci bus iommu kernel interface */
+
+/* To keep track of the dma mapping (sg/single) that has been set */
+#define __data_mapped SCp.phase
+#define __data_mapping SCp.have_data_in
+
+static void __unmap_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_unmap_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+ cmd->__data_mapped = 0;
+}
+
+static u_long __map_scsi_single_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ dma_addr_t mapping;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->request_bufflen == 0)
+ return 0;
+
+ mapping = pci_map_single(pdev, cmd->request_buffer,
+ cmd->request_bufflen, dma_dir);
+ cmd->__data_mapped = 1;
+ cmd->__data_mapping = mapping;
+
+ return mapping;
+}
+
+static int __map_scsi_sg_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int use_sg;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->use_sg == 0)
+ return 0;
+
+ use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ cmd->__data_mapped = 2;
+ cmd->__data_mapping = use_sg;
+
+ return use_sg;
+}
+
+static void __sync_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_dma_sync_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_dma_sync_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+}
+
+#define scsi_sg_dma_address(sc) sg_dma_address(sc)
+#define scsi_sg_dma_len(sc) sg_dma_len(sc)
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->pdev, cmd)
+#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->pdev, cmd)
+#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->pdev, cmd)
+#define sync_scsi_data(np, cmd) __sync_scsi_data(np->pdev, cmd)
+
+
+/*
+ * Print out some buffer.
+ */
+static void ncr_print_hex(u_char *p, int n)
+{
+ while (n-- > 0)
+ printk (" %x", *p++);
+}
+
+static void ncr_printl_hex(char *label, u_char *p, int n)
+{
+ printk("%s", label);
+ ncr_print_hex(p, n);
+ printk (".\n");
+}
+
+/*
+** Transfer direction
+**
+** Until some linux kernel version near 2.3.40, low-level scsi
+** drivers were not told about data transfer direction.
+** We check the existence of this feature that has been expected
+** for a _long_ time by all SCSI driver developers by just
+** testing against the definition of SCSI_DATA_UNKNOWN. Indeed
+** this is a hack, but testing against a kernel version would
+** have been a shame. ;-)
+*/
+#ifdef SCSI_DATA_UNKNOWN
+
+#define scsi_data_direction(cmd) (cmd->sc_data_direction)
+
+#else
+
+#define SCSI_DATA_UNKNOWN 0
+#define SCSI_DATA_WRITE 1
+#define SCSI_DATA_READ 2
+#define SCSI_DATA_NONE 3
+
+static __inline__ int scsi_data_direction(Scsi_Cmnd *cmd)
+{
+ int direction;
+
+ switch((int) cmd->cmnd[0]) {
+ case 0x08: /* READ(6) 08 */
+ case 0x28: /* READ(10) 28 */
+ case 0xA8: /* READ(12) A8 */
+ direction = SCSI_DATA_READ;
+ break;
+ case 0x0A: /* WRITE(6) 0A */
+ case 0x2A: /* WRITE(10) 2A */
+ case 0xAA: /* WRITE(12) AA */
+ direction = SCSI_DATA_WRITE;
+ break;
+ default:
+ direction = SCSI_DATA_UNKNOWN;
+ break;
+ }
+
+ return direction;
+}
+
+#endif /* SCSI_DATA_UNKNOWN */
+
+/*
+** Head of list of NCR boards
+**
+** For kernel version < 1.3.70, host is retrieved by its irq level.
+** For later kernels, the internal host control block address
+** (struct ncb) is used as device id parameter of the irq stuff.
+*/
+
+static struct Scsi_Host *first_host = NULL;
+
+
+/*
+** /proc directory entry and proc_info function
+*/
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,27)
+static struct proc_dir_entry proc_scsi_sym53c8xx = {
+ PROC_SCSI_SYM53C8XX, 9, NAME53C8XX,
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+#endif
+static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func);
+#endif
+
+/*
+** Driver setup.
+**
+** This structure is initialized from linux config options.
+** It can be overridden at boot-up by the boot command line.
+*/
+static struct ncr_driver_setup
+ driver_setup = SCSI_NCR_DRIVER_SETUP;
+
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+static struct ncr_driver_setup
+ driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
+# ifdef MODULE
+char *sym53c8xx = 0; /* command line passed by insmod */
+# if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,30)
+MODULE_PARM(sym53c8xx, "s");
+# endif
+# endif
+#endif
+
+/*
+** Other Linux definitions
+*/
+#define SetScsiResult(cmd, h_sts, s_sts) \
+ cmd->result = (((h_sts) << 16) + ((s_sts) & 0x7f))
+
+/* We may have to remind our amnesiac SCSI layer of the reason of the abort */
+#if 0
+#define SetScsiAbortResult(cmd) \
+ SetScsiResult( \
+ cmd, \
+ (cmd)->abort_reason == DID_TIME_OUT ? DID_TIME_OUT : DID_ABORT, \
+ 0xff)
+#else
+#define SetScsiAbortResult(cmd) SetScsiResult(cmd, DID_ABORT, 0xff)
+#endif
+
+static void sym53c8xx_select_queue_depths(
+ struct Scsi_Host *host, struct scsi_device *devlist);
+static void sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs);
+static void sym53c8xx_timeout(unsigned long np);
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static u_char Tekram_sync[16] __initdata =
+ {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10};
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Structures used by sym53c8xx_detect/sym53c8xx_pci_init to
+** transmit device configuration to the ncr_attach() function.
+*/
+typedef struct {
+ int bus;
+ u_char device_fn;
+ u_long base;
+ u_long base_2;
+ u_long io_port;
+ int irq;
+/* port and reg fields to use INB, OUTB macros */
+ u_long base_io;
+ volatile struct ncr_reg *reg;
+} ncr_slot;
+
+typedef struct {
+ int type;
+#define SCSI_NCR_SYMBIOS_NVRAM (1)
+#define SCSI_NCR_TEKRAM_NVRAM (2)
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ union {
+ Symbios_nvram Symbios;
+ Tekram_nvram Tekram;
+ } data;
+#endif
+} ncr_nvram;
+
+/*
+** Structure used by sym53c8xx_detect/sym53c8xx_pci_init
+** to save data on each detected board for ncr_attach().
+*/
+typedef struct {
+ pcidev_t pdev;
+ ncr_slot slot;
+ ncr_chip chip;
+ ncr_nvram *nvram;
+ u_char host_id;
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ u_char pqs_pds;
+#endif
+ int attach_done;
+} ncr_device;
+
+/*==========================================================
+**
+** assert ()
+**
+**==========================================================
+**
+** modified copy from 386bsd:/usr/include/sys/assert.h
+**
+**----------------------------------------------------------
+*/
+
+#define assert(expression) { \
+ if (!(expression)) { \
+ (void)panic( \
+ "assertion \"%s\" failed: file \"%s\", line %d\n", \
+ #expression, \
+ __FILE__, __LINE__); \
+ } \
+}
+
+/*==========================================================
+**
+** Command control block states.
+**
+**==========================================================
+*/
+
+#define HS_IDLE (0)
+#define HS_BUSY (1)
+#define HS_NEGOTIATE (2) /* sync/wide data transfer*/
+#define HS_DISCONNECT (3) /* Disconnected by target */
+
+#define HS_DONEMASK (0x80)
+#define HS_COMPLETE (4|HS_DONEMASK)
+#define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
+#define HS_RESET (6|HS_DONEMASK) /* SCSI reset */
+#define HS_ABORTED (7|HS_DONEMASK) /* Transfer aborted */
+#define HS_TIMEOUT (8|HS_DONEMASK) /* Software timeout */
+#define HS_FAIL (9|HS_DONEMASK) /* SCSI or PCI bus errors */
+#define HS_UNEXPECTED (10|HS_DONEMASK)/* Unexpected disconnect */
+
+#define DSA_INVALID 0xffffffff
+
+/*==========================================================
+**
+** Software Interrupt Codes
+**
+**==========================================================
+*/
+
+#define SIR_BAD_STATUS (1)
+#define SIR_SEL_ATN_NO_MSG_OUT (2)
+#define SIR_MSG_RECEIVED (3)
+#define SIR_MSG_WEIRD (4)
+#define SIR_NEGO_FAILED (5)
+#define SIR_NEGO_PROTO (6)
+#define SIR_SCRIPT_STOPPED (7)
+#define SIR_REJECT_TO_SEND (8)
+#define SIR_SWIDE_OVERRUN (9)
+#define SIR_SODL_UNDERRUN (10)
+#define SIR_RESEL_NO_MSG_IN (11)
+#define SIR_RESEL_NO_IDENTIFY (12)
+#define SIR_RESEL_BAD_LUN (13)
+#define SIR_TARGET_SELECTED (14)
+#define SIR_RESEL_BAD_I_T_L (15)
+#define SIR_RESEL_BAD_I_T_L_Q (16)
+#define SIR_ABORT_SENT (17)
+#define SIR_RESEL_ABORTED (18)
+#define SIR_MSG_OUT_DONE (19)
+#define SIR_AUTO_SENSE_DONE (20)
+#define SIR_DUMMY_INTERRUPT (21)
+#define SIR_DATA_OVERRUN (22)
+#define SIR_BAD_PHASE (23)
+#define SIR_MAX (23)
+
+/*==========================================================
+**
+** Extended error bits.
+** xerr_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define XE_EXTRA_DATA (1) /* unexpected data phase */
+#define XE_BAD_PHASE (2) /* illegal phase (4/5) */
+#define XE_PARITY_ERR (4) /* unrecovered SCSI parity error */
+#define XE_SODL_UNRUN (1<<3)
+#define XE_SWIDE_OVRUN (1<<4)
+
+/*==========================================================
+**
+** Negotiation status.
+** nego_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define NS_NOCHANGE (0)
+#define NS_SYNC (1)
+#define NS_WIDE (2)
+#define NS_PPR (4)
+
+/*==========================================================
+**
+** "Special features" of targets.
+** quirks field of struct tcb.
+** actualquirks field of struct ccb.
+**
+**==========================================================
+*/
+
+#define QUIRK_AUTOSAVE (0x01)
+
+/*==========================================================
+**
+** Capability bits in Inquire response byte 7.
+**
+**==========================================================
+*/
+
+#define INQ7_QUEUE (0x02)
+#define INQ7_SYNC (0x10)
+#define INQ7_WIDE16 (0x20)
+
+/*==========================================================
+**
+** A CCB hashed table is used to retrieve CCB address
+** from DSA value.
+**
+**==========================================================
+*/
+
+#define CCB_HASH_SHIFT 8
+#define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
+#define CCB_HASH_MASK (CCB_HASH_SIZE-1)
+#define CCB_HASH_CODE(dsa) (((dsa) >> 11) & CCB_HASH_MASK)
+
+/*==========================================================
+**
+** Declaration of structs.
+**
+**==========================================================
+*/
+
+struct tcb;
+struct lcb;
+struct ccb;
+struct ncb;
+struct script;
+
+typedef struct ncb * ncb_p;
+typedef struct tcb * tcb_p;
+typedef struct lcb * lcb_p;
+typedef struct ccb * ccb_p;
+
+struct link {
+ ncrcmd l_cmd;
+ ncrcmd l_paddr;
+};
+
+struct usrcmd {
+ u_long target;
+ u_long lun;
+ u_long data;
+ u_long cmd;
+};
+
+#define UC_SETSYNC 10
+#define UC_SETTAGS 11
+#define UC_SETDEBUG 12
+#define UC_SETORDER 13
+#define UC_SETWIDE 14
+#define UC_SETFLAG 15
+#define UC_SETVERBOSE 17
+#define UC_RESETDEV 18
+#define UC_CLEARDEV 19
+
+#define UF_TRACE (0x01)
+#define UF_NODISC (0x02)
+#define UF_NOSCAN (0x04)
+
+/*========================================================================
+**
+** Declaration of structs: target control block
+**
+**========================================================================
+*/
+struct tcb {
+ /*----------------------------------------------------------------
+ ** LUN tables.
+ ** An array of bus addresses is used on reselection by
+ ** the SCRIPT.
+ **----------------------------------------------------------------
+ */
+ u_int32 *luntbl; /* lcbs bus address table */
+ u_int32 b_luntbl; /* bus address of this table */
+ u_int32 b_lun0; /* bus address of lun0 */
+ lcb_p l0p; /* lcb of LUN #0 (normal case) */
+#if MAX_LUN > 1
+ lcb_p *lmp; /* Other lcb's [1..MAX_LUN] */
+#endif
+ /*----------------------------------------------------------------
+ ** Target capabilities.
+ **----------------------------------------------------------------
+ */
+ u_char inq_done; /* Target capabilities received */
+ u_char inq_byte7; /* Contains these capabilities */
+
+ /*----------------------------------------------------------------
+ ** Some flags.
+ **----------------------------------------------------------------
+ */
+ u_char to_reset; /* This target is to be reset */
+
+ /*----------------------------------------------------------------
+ ** Pointer to the ccb used for negotiation.
+ ** Prevent from starting a negotiation for all queued commands
+ ** when tagged command queuing is enabled.
+ **----------------------------------------------------------------
+ */
+ ccb_p nego_cp;
+
+ /*----------------------------------------------------------------
+ ** negotiation of wide and synch transfer and device quirks.
+ ** sval, wval and uval are read from SCRIPTS and so have alignment
+ ** constraints.
+ **----------------------------------------------------------------
+ */
+/*0*/ u_char minsync;
+/*1*/ u_char sval;
+/*2*/ u_short period;
+/*0*/ u_char maxoffs;
+/*1*/ u_char quirks;
+/*2*/ u_char widedone;
+/*3*/ u_char wval;
+/*0*/ u_char uval;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ u_char ic_min_sync;
+ u_char ic_max_width;
+ u_char ic_done;
+#endif
+ u_char ic_maximums_set;
+ u_char ppr_negotiation;
+
+ /*----------------------------------------------------------------
+ ** User settable limits and options.
+ ** These limits are read from the NVRAM if present.
+ **----------------------------------------------------------------
+ */
+ u_char usrsync;
+ u_char usrwide;
+ u_short usrtags;
+ u_char usrflag;
+};
+
+/*========================================================================
+**
+** Declaration of structs: lun control block
+**
+**========================================================================
+*/
+struct lcb {
+ /*----------------------------------------------------------------
+ ** On reselection, SCRIPTS use this value as a JUMP address
+ ** after the IDENTIFY has been successfully received.
+ ** This field is set to 'resel_tag' if TCQ is enabled and
+ ** to 'resel_notag' if TCQ is disabled.
+ ** (Must be at zero due to bad lun handling on reselection)
+ **----------------------------------------------------------------
+ */
+/*0*/ u_int32 resel_task;
+
+ /*----------------------------------------------------------------
+ ** Task table used by the script processor to retrieve the
+ ** task corresponding to a reselected nexus. The TAG is used
+ ** as offset to determine the corresponding entry.
+ ** Each entry contains the associated CCB bus address.
+ **----------------------------------------------------------------
+ */
+ u_int32 tasktbl_0; /* Used if TCQ not enabled */
+ u_int32 *tasktbl;
+ u_int32 b_tasktbl;
+
+ /*----------------------------------------------------------------
+ ** CCB queue management.
+ **----------------------------------------------------------------
+ */
+ XPT_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
+ XPT_QUEHEAD wait_ccbq; /* Queue of waiting for IO CCBs */
+ u_short busyccbs; /* CCBs busy for this lun */
+ u_short queuedccbs; /* CCBs queued to the controller*/
+ u_short queuedepth; /* Queue depth for this lun */
+ u_short scdev_depth; /* SCSI device queue depth */
+ u_short maxnxs; /* Max possible nexuses */
+
+ /*----------------------------------------------------------------
+ ** Control of tagged command queuing.
+ ** Tags allocation is performed using a circular buffer.
+ ** This avoids using a loop for tag allocation.
+ **----------------------------------------------------------------
+ */
+ u_short ia_tag; /* Tag allocation index */
+ u_short if_tag; /* Tag release index */
+ u_char *cb_tags; /* Circular tags buffer */
+ u_char inq_byte7; /* Store unit CmdQ capability */
+ u_char usetags; /* Command queuing is active */
+ u_char to_clear; /* User wants to clear all tasks*/
+ u_short maxtags; /* Max NR of tags asked by user */
+ u_short numtags; /* Current number of tags */
+
+ /*----------------------------------------------------------------
+ ** QUEUE FULL and ORDERED tag control.
+ **----------------------------------------------------------------
+ */
+ u_short num_good; /* Nr of GOOD since QUEUE FULL */
+ u_short tags_sum[2]; /* Tags sum counters */
+ u_char tags_si; /* Current index to tags sum */
+ u_long tags_stime; /* Last time we switch tags_sum */
+};
+
+/*========================================================================
+**
+** Declaration of structs: actions for a task.
+**
+**========================================================================
+**
+** It is part of the CCB and is called by the scripts processor to
+** start or restart the data structure (nexus).
+**
+**------------------------------------------------------------------------
+*/
+struct action {
+ u_int32 start;
+ u_int32 restart;
+};
+
+/*========================================================================
+**
+** Declaration of structs: Phase mismatch context.
+**
+**========================================================================
+**
+** It is part of the CCB and is used as parameters for the DATA
+** pointer. We need two contexts to handle correctly the SAVED
+** DATA POINTER.
+**
+**------------------------------------------------------------------------
+*/
+struct pm_ctx {
+ struct scr_tblmove sg; /* Updated interrupted SG block */
+ u_int32 ret; /* SCRIPT return address */
+};
+
+/*========================================================================
+**
+** Declaration of structs: global HEADER.
+**
+**========================================================================
+**
+** In earlier driver versions, this substructure was copied from the
+** ccb to a global address after selection (or reselection) and copied
+** back before disconnect. Since we are now using LOAD/STORE DSA
+** RELATIVE instructions, the script is able to access directly these
+** fields, and so, this header is no more copied.
+**
+**------------------------------------------------------------------------
+*/
+
+struct head {
+ /*----------------------------------------------------------------
+ ** Start and restart SCRIPTS addresses (must be at 0).
+ **----------------------------------------------------------------
+ */
+ struct action go;
+
+ /*----------------------------------------------------------------
+ ** Saved data pointer.
+ ** Points to the position in the script responsible for the
+ ** actual transfer of data.
+ ** It's written after reception of a SAVE_DATA_POINTER message.
+ ** The goalpointer points after the last transfer command.
+ **----------------------------------------------------------------
+ */
+ u_int32 savep;
+ u_int32 lastp;
+ u_int32 goalp;
+
+ /*----------------------------------------------------------------
+ ** Alternate data pointer.
+ ** They are copied back to savep/lastp/goalp by the SCRIPTS
+ ** when the direction is unknown and the device claims data out.
+ **----------------------------------------------------------------
+ */
+ u_int32 wlastp;
+ u_int32 wgoalp;
+
+ /*----------------------------------------------------------------
+ ** Status fields.
+ **----------------------------------------------------------------
+ */
+ u_char status[4]; /* host status */
+};
+
+/*
+** LUN control block lookup.
+** We use a direct pointer for LUN #0, and a table of pointers
+** which is only allocated for devices that support LUN(s) > 0.
+*/
+#if MAX_LUN <= 1
+#define ncr_lp(np, tp, lun) (!lun) ? (tp)->l0p : 0
+#else
+#define ncr_lp(np, tp, lun) \
+ (!lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(lun)] : 0
+#endif
+
+/*
+** The status bytes are used by the host and the script processor.
+**
+** The four bytes (status[4]) are copied to the scratchb register
+** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect,
+** and copied back just after disconnecting.
+** Inside the script the XX_REG are used.
+*/
+
+/*
+** Last four bytes (script)
+*/
+#define QU_REG scr0
+#define HS_REG scr1
+#define HS_PRT nc_scr1
+#define SS_REG scr2
+#define SS_PRT nc_scr2
+#define HF_REG scr3
+#define HF_PRT nc_scr3
+
+/*
+** Last four bytes (host)
+*/
+#define actualquirks phys.header.status[0]
+#define host_status phys.header.status[1]
+#define scsi_status phys.header.status[2]
+#define host_flags phys.header.status[3]
+
+/*
+** Host flags
+*/
+#define HF_IN_PM0 1u
+#define HF_IN_PM1 (1u<<1)
+#define HF_ACT_PM (1u<<2)
+#define HF_DP_SAVED (1u<<3)
+#define HF_AUTO_SENSE (1u<<4)
+#define HF_DATA_IN (1u<<5)
+#define HF_PM_TO_C (1u<<6)
+#define HF_EXT_ERR (1u<<7)
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define HF_HINT_IARB (1u<<7)
+#endif
+
+/*
+** This one is stolen from QU_REG.:)
+*/
+#define HF_DATA_ST (1u<<7)
+
+/*==========================================================
+**
+** Declaration of structs: Data structure block
+**
+**==========================================================
+**
+** During execution of a ccb by the script processor,
+** the DSA (data structure address) register points
+** to this substructure of the ccb.
+** This substructure contains the header with
+** the script-processor-changable data and
+** data blocks for the indirect move commands.
+**
+**----------------------------------------------------------
+*/
+
+struct dsb {
+
+ /*
+ ** Header.
+ */
+
+ struct head header;
+
+ /*
+ ** Table data for Script
+ */
+
+ struct scr_tblsel select;
+ struct scr_tblmove smsg ;
+ struct scr_tblmove smsg_ext ;
+ struct scr_tblmove cmd ;
+ struct scr_tblmove sense ;
+ struct scr_tblmove wresid;
+ struct scr_tblmove data [MAX_SCATTER];
+
+ /*
+ ** Phase mismatch contexts.
+ ** We need two to handle correctly the
+ ** SAVED DATA POINTER.
+ */
+
+ struct pm_ctx pm0;
+ struct pm_ctx pm1;
+};
+
+
+/*========================================================================
+**
+** Declaration of structs: Command control block.
+**
+**========================================================================
+*/
+struct ccb {
+ /*----------------------------------------------------------------
+ ** This is the data structure which is pointed by the DSA
+ ** register when it is executed by the script processor.
+ ** It must be the first entry.
+ **----------------------------------------------------------------
+ */
+ struct dsb phys;
+
+ /*----------------------------------------------------------------
+ ** The general SCSI driver provides a
+ ** pointer to a control block.
+ **----------------------------------------------------------------
+ */
+ Scsi_Cmnd *cmd; /* SCSI command */
+ u_char cdb_buf[16]; /* Copy of CDB */
+ u_char sense_buf[64];
+ int data_len; /* Total data length */
+ int segments; /* Number of SG segments */
+
+ /*----------------------------------------------------------------
+ ** Message areas.
+ ** We prepare a message to be sent after selection.
+ ** We may use a second one if the command is rescheduled
+ ** due to CHECK_CONDITION or QUEUE FULL status.
+ ** Contents are IDENTIFY and SIMPLE_TAG.
+ ** While negotiating sync or wide transfer,
+ ** a SDTR or WDTR message is appended.
+ **----------------------------------------------------------------
+ */
+ u_char scsi_smsg [12];
+ u_char scsi_smsg2[12];
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous status'.
+ **----------------------------------------------------------------
+ */
+ u_char nego_status; /* Negotiation status */
+ u_char xerr_status; /* Extended error flags */
+ u_int32 extra_bytes; /* Extraneous bytes transferred */
+
+ /*----------------------------------------------------------------
+ ** Saved info for auto-sense
+ **----------------------------------------------------------------
+ */
+ u_char sv_scsi_status;
+ u_char sv_xerr_status;
+
+ /*----------------------------------------------------------------
+ ** Other fields.
+ **----------------------------------------------------------------
+ */
+ u_long p_ccb; /* BUS address of this CCB */
+ u_char sensecmd[6]; /* Sense command */
+ u_char to_abort; /* This CCB is to be aborted */
+ u_short tag; /* Tag for this transfer */
+ /* NO_TAG means no tag */
+ u_char tags_si; /* Lun tags sum index (0,1) */
+
+ u_char target;
+ u_char lun;
+ u_short queued;
+ ccb_p link_ccb; /* Host adapter CCB chain */
+ ccb_p link_ccbh; /* Host adapter CCB hash chain */
+ XPT_QUEHEAD link_ccbq; /* Link to unit CCB queue */
+ u_int32 startp; /* Initial data pointer */
+ u_int32 lastp0; /* Initial 'lastp' */
+ int ext_sg; /* Extreme data pointer, used */
+ int ext_ofs; /* to calculate the residual. */
+ int resid;
+};
+
+#define CCB_PHYS(cp,lbl) (cp->p_ccb + offsetof(struct ccb, lbl))
+
+
+/*========================================================================
+**
+** Declaration of structs: NCR device descriptor
+**
+**========================================================================
+*/
+struct ncb {
+ /*----------------------------------------------------------------
+ ** Idle task and invalid task actions and their bus
+ ** addresses.
+ **----------------------------------------------------------------
+ */
+ struct action idletask;
+ struct action notask;
+ struct action bad_i_t_l;
+ struct action bad_i_t_l_q;
+ u_long p_idletask;
+ u_long p_notask;
+ u_long p_bad_i_t_l;
+ u_long p_bad_i_t_l_q;
+
+ /*----------------------------------------------------------------
+ ** Dummy lun table to protect us against target returning bad
+ ** lun number on reselection.
+ **----------------------------------------------------------------
+ */
+ u_int32 *badluntbl; /* Table physical address */
+ u_int32 resel_badlun; /* SCRIPT handler BUS address */
+
+ /*----------------------------------------------------------------
+ ** Bit 32-63 of the on-chip RAM bus address in LE format.
+ ** The START_RAM64 script loads the MMRS and MMWS from this
+ ** field.
+ **----------------------------------------------------------------
+ */
+ u_int32 scr_ram_seg;
+
+ /*----------------------------------------------------------------
+ ** CCBs management queues.
+ **----------------------------------------------------------------
+ */
+ Scsi_Cmnd *waiting_list; /* Commands waiting for a CCB */
+ /* when lcb is not allocated. */
+ Scsi_Cmnd *done_list; /* Commands waiting for done() */
+ /* callback to be invoked. */
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+ spinlock_t smp_lock; /* Lock for SMP threading */
+#endif
+
+ /*----------------------------------------------------------------
+ ** Chip and controller indentification.
+ **----------------------------------------------------------------
+ */
+ int unit; /* Unit number */
+ char chip_name[8]; /* Chip name */
+ char inst_name[16]; /* ncb instance name */
+
+ /*----------------------------------------------------------------
+ ** Initial value of some IO register bits.
+ ** These values are assumed to have been set by BIOS, and may
+ ** be used for probing adapter implementation differences.
+ **----------------------------------------------------------------
+ */
+ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
+ sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_stest1, sv_scntl4;
+
+ /*----------------------------------------------------------------
+ ** Actual initial value of IO register bits used by the
+ ** driver. They are loaded at initialisation according to
+ ** features that are to be enabled.
+ **----------------------------------------------------------------
+ */
+ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
+ rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
+
+ /*----------------------------------------------------------------
+ ** Target data.
+ ** Target control block bus address array used by the SCRIPT
+ ** on reselection.
+ **----------------------------------------------------------------
+ */
+ struct tcb target[MAX_TARGET];
+ u_int32 *targtbl;
+
+ /*----------------------------------------------------------------
+ ** Virtual and physical bus addresses of the chip.
+ **----------------------------------------------------------------
+ */
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ u_long base_va; /* MMIO base virtual address */
+ u_long base2_va; /* On-chip RAM virtual address */
+#endif
+ u_long base_ba; /* MMIO base bus address */
+ u_long base_io; /* IO space base address */
+ u_long base_ws; /* (MM)IO window size */
+ u_long base2_ba; /* On-chip RAM bus address */
+ u_long base2_ws; /* On-chip RAM window size */
+ u_int irq; /* IRQ number */
+ volatile /* Pointer to volatile for */
+ struct ncr_reg *reg; /* memory mapped IO. */
+
+ /*----------------------------------------------------------------
+ ** SCRIPTS virtual and physical bus addresses.
+ ** 'script' is loaded in the on-chip RAM if present.
+ ** 'scripth' stays in main memory for all chips except the
+ ** 53C895A and 53C896 that provide 8K on-chip RAM.
+ **----------------------------------------------------------------
+ */
+ struct script *script0; /* Copies of script and scripth */
+ struct scripth *scripth0; /* relocated for this ncb. */
+ u_long p_script; /* Actual script and scripth */
+ u_long p_scripth; /* bus addresses. */
+ u_long p_scripth0;
+
+ /*----------------------------------------------------------------
+ ** General controller parameters and configuration.
+ **----------------------------------------------------------------
+ */
+ pcidev_t pdev;
+ u_short device_id; /* PCI device id */
+ u_char revision_id; /* PCI device revision id */
+ u_char bus; /* PCI BUS number */
+ u_char device_fn; /* PCI BUS device and function */
+ u_char myaddr; /* SCSI id of the adapter */
+ u_char maxburst; /* log base 2 of dwords burst */
+ u_char maxwide; /* Maximum transfer width */
+ u_char minsync; /* Minimum sync period factor */
+ u_char maxsync; /* Maximum sync period factor */
+ u_char maxoffs; /* Max scsi offset */
+ u_char multiplier; /* Clock multiplier (1,2,4) */
+ u_char clock_divn; /* Number of clock divisors */
+ u_long clock_khz; /* SCSI clock frequency in KHz */
+ u_int features; /* Chip features map */
+
+ /*----------------------------------------------------------------
+ ** Range for the PCI clock frequency measurement result
+ ** that ensures the algorithm used by the driver can be
+ ** trusted for the SCSI clock frequency measurement.
+ ** (Assuming a PCI clock frequency of 33 MHz).
+ **----------------------------------------------------------------
+ */
+ u_int pciclock_min;
+ u_int pciclock_max;
+
+ /*----------------------------------------------------------------
+ ** Start queue management.
+ ** It is filled up by the host processor and accessed by the
+ ** SCRIPTS processor in order to start SCSI commands.
+ **----------------------------------------------------------------
+ */
+ u_long p_squeue; /* Start queue BUS address */
+ u_int32 *squeue; /* Start queue virtual address */
+ u_short squeueput; /* Next free slot of the queue */
+ u_short actccbs; /* Number of allocated CCBs */
+ u_short queuedepth; /* Start queue depth */
+
+ /*----------------------------------------------------------------
+ ** Command completion queue.
+ ** It is the same size as the start queue to avoid overflow.
+ **----------------------------------------------------------------
+ */
+ u_short dqueueget; /* Next position to scan */
+ u_int32 *dqueue; /* Completion (done) queue */
+
+ /*----------------------------------------------------------------
+ ** Timeout handler.
+ **----------------------------------------------------------------
+ */
+ struct timer_list timer; /* Timer handler link header */
+ u_long lasttime;
+ u_long settle_time; /* Resetting the SCSI BUS */
+
+ /*----------------------------------------------------------------
+ ** Debugging and profiling.
+ **----------------------------------------------------------------
+ */
+ struct ncr_reg regdump; /* Register dump */
+ u_long regtime; /* Time it has been done */
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous buffers accessed by the scripts-processor.
+ ** They shall be DWORD aligned, because they may be read or
+ ** written with a script command.
+ **----------------------------------------------------------------
+ */
+ u_char msgout[12]; /* Buffer for MESSAGE OUT */
+ u_char msgin [12]; /* Buffer for MESSAGE IN */
+ u_int32 lastmsg; /* Last SCSI message sent */
+ u_char scratch; /* Scratch for SCSI receive */
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous configuration and status parameters.
+ **----------------------------------------------------------------
+ */
+ u_char scsi_mode; /* Current SCSI BUS mode */
+ u_char order; /* Tag order to use */
+ u_char verbose; /* Verbosity for this controller*/
+ u_int32 ncr_cache; /* Used for cache test at init. */
+ u_long p_ncb; /* BUS address of this NCB */
+
+ /*----------------------------------------------------------------
+ ** CCB lists and queue.
+ **----------------------------------------------------------------
+ */
+ ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */
+ struct ccb *ccbc; /* CCB chain */
+ XPT_QUEHEAD free_ccbq; /* Queue of available CCBs */
+
+ /*----------------------------------------------------------------
+ ** IMMEDIATE ARBITRATION (IARB) control.
+ ** We keep track in 'last_cp' of the last CCB that has been
+ ** queued to the SCRIPTS processor and clear 'last_cp' when
+ ** this CCB completes. If last_cp is not zero at the moment
+ ** we queue a new CCB, we set a flag in 'last_cp' that is
+ ** used by the SCRIPTS as a hint for setting IARB.
+ ** We donnot set more than 'iarb_max' consecutive hints for
+ ** IARB in order to leave devices a chance to reselect.
+ ** By the way, any non zero value of 'iarb_max' is unfair. :)
+ **----------------------------------------------------------------
+ */
+#ifdef SCSI_NCR_IARB_SUPPORT
+ struct ccb *last_cp; /* Last queud CCB used for IARB */
+ u_short iarb_max; /* Max. # consecutive IARB hints*/
+ u_short iarb_count; /* Actual # of these hints */
+#endif
+
+ /*----------------------------------------------------------------
+ ** We need the LCB in order to handle disconnections and
+ ** to count active CCBs for task management. So, we use
+ ** a unique CCB for LUNs we donnot have the LCB yet.
+ ** This queue normally should have at most 1 element.
+ **----------------------------------------------------------------
+ */
+ XPT_QUEHEAD b0_ccbq;
+
+ /*----------------------------------------------------------------
+ ** We use a different scatter function for 896 rev 1.
+ **----------------------------------------------------------------
+ */
+ int (*scatter) (ncb_p, ccb_p, Scsi_Cmnd *);
+
+ /*----------------------------------------------------------------
+ ** Command abort handling.
+ ** We need to synchronize tightly with the SCRIPTS
+ ** processor in order to handle things correctly.
+ **----------------------------------------------------------------
+ */
+ u_char abrt_msg[4]; /* Message to send buffer */
+ struct scr_tblmove abrt_tbl; /* Table for the MOV of it */
+ struct scr_tblsel abrt_sel; /* Sync params for selection */
+ u_char istat_sem; /* Tells the chip to stop (SEM) */
+
+ /*----------------------------------------------------------------
+ ** Fields that should be removed or changed.
+ **----------------------------------------------------------------
+ */
+ struct usrcmd user; /* Command from user */
+ volatile u_char release_stage; /* Synchronisation stage on release */
+
+ /*----------------------------------------------------------------
+ ** Fields that are used (primarily) for integrity check
+ **----------------------------------------------------------------
+ */
+ unsigned char check_integrity; /* Enable midlayer integ. check on
+ * bus scan. */
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ unsigned char check_integ_par; /* Set if par or Init. Det. error
+ * used only during integ check */
+#endif
+};
+
+#define NCB_PHYS(np, lbl) (np->p_ncb + offsetof(struct ncb, lbl))
+#define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl))
+#define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl))
+#define NCB_SCRIPTH0_PHYS(np,lbl) (np->p_scripth0+offsetof (struct scripth,lbl))
+
+/*==========================================================
+**
+**
+** Script for NCR-Processor.
+**
+** Use ncr_script_fill() to create the variable parts.
+** Use ncr_script_copy_and_bind() to make a copy and
+** bind to physical addresses.
+**
+**
+**==========================================================
+**
+** We have to know the offsets of all labels before
+** we reach them (for forward jumps).
+** Therefore we declare a struct here.
+** If you make changes inside the script,
+** DONT FORGET TO CHANGE THE LENGTHS HERE!
+**
+**----------------------------------------------------------
+*/
+
+/*
+** Script fragments which are loaded into the on-chip RAM
+** of 825A, 875, 876, 895, 895A and 896 chips.
+*/
+struct script {
+ ncrcmd start [ 14];
+ ncrcmd getjob_begin [ 4];
+ ncrcmd getjob_end [ 4];
+ ncrcmd select [ 8];
+ ncrcmd wf_sel_done [ 2];
+ ncrcmd send_ident [ 2];
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd select2 [ 8];
+#else
+ ncrcmd select2 [ 2];
+#endif
+ ncrcmd command [ 2];
+ ncrcmd dispatch [ 28];
+ ncrcmd sel_no_cmd [ 10];
+ ncrcmd init [ 6];
+ ncrcmd clrack [ 4];
+ ncrcmd disp_status [ 4];
+ ncrcmd datai_done [ 26];
+ ncrcmd datao_done [ 12];
+ ncrcmd ign_i_w_r_msg [ 4];
+ ncrcmd datai_phase [ 2];
+ ncrcmd datao_phase [ 4];
+ ncrcmd msg_in [ 2];
+ ncrcmd msg_in2 [ 10];
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd status [ 14];
+#else
+ ncrcmd status [ 10];
+#endif
+ ncrcmd complete [ 8];
+#ifdef SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+ ncrcmd complete2 [ 12];
+#else
+ ncrcmd complete2 [ 10];
+#endif
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ ncrcmd done [ 18];
+#else
+ ncrcmd done [ 14];
+#endif
+ ncrcmd done_end [ 2];
+ ncrcmd save_dp [ 8];
+ ncrcmd restore_dp [ 4];
+ ncrcmd disconnect [ 20];
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd idle [ 4];
+#else
+ ncrcmd idle [ 2];
+#endif
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd ungetjob [ 6];
+#else
+ ncrcmd ungetjob [ 4];
+#endif
+ ncrcmd reselect [ 4];
+ ncrcmd reselected [ 20];
+ ncrcmd resel_scntl4 [ 30];
+#if MAX_TASKS*4 > 512
+ ncrcmd resel_tag [ 18];
+#elif MAX_TASKS*4 > 256
+ ncrcmd resel_tag [ 12];
+#else
+ ncrcmd resel_tag [ 8];
+#endif
+ ncrcmd resel_go [ 6];
+ ncrcmd resel_notag [ 2];
+ ncrcmd resel_dsa [ 8];
+ ncrcmd data_in [MAX_SCATTER * SCR_SG_SIZE];
+ ncrcmd data_in2 [ 4];
+ ncrcmd data_out [MAX_SCATTER * SCR_SG_SIZE];
+ ncrcmd data_out2 [ 4];
+ ncrcmd pm0_data [ 12];
+ ncrcmd pm0_data_out [ 6];
+ ncrcmd pm0_data_end [ 6];
+ ncrcmd pm1_data [ 12];
+ ncrcmd pm1_data_out [ 6];
+ ncrcmd pm1_data_end [ 6];
+};
+
+/*
+** Script fragments which stay in main memory for all chips
+** except for the 895A and 896 that support 8K on-chip RAM.
+*/
+struct scripth {
+ ncrcmd start64 [ 2];
+ ncrcmd no_data [ 2];
+ ncrcmd sel_for_abort [ 18];
+ ncrcmd sel_for_abort_1 [ 2];
+ ncrcmd select_no_atn [ 8];
+ ncrcmd wf_sel_done_no_atn [ 4];
+
+ ncrcmd msg_in_etc [ 14];
+ ncrcmd msg_received [ 4];
+ ncrcmd msg_weird_seen [ 4];
+ ncrcmd msg_extended [ 20];
+ ncrcmd msg_bad [ 6];
+ ncrcmd msg_weird [ 4];
+ ncrcmd msg_weird1 [ 8];
+
+ ncrcmd wdtr_resp [ 6];
+ ncrcmd send_wdtr [ 4];
+ ncrcmd sdtr_resp [ 6];
+ ncrcmd send_sdtr [ 4];
+ ncrcmd ppr_resp [ 6];
+ ncrcmd send_ppr [ 4];
+ ncrcmd nego_bad_phase [ 4];
+ ncrcmd msg_out [ 4];
+ ncrcmd msg_out_done [ 4];
+ ncrcmd data_ovrun [ 2];
+ ncrcmd data_ovrun1 [ 22];
+ ncrcmd data_ovrun2 [ 8];
+ ncrcmd abort_resel [ 16];
+ ncrcmd resend_ident [ 4];
+ ncrcmd ident_break [ 4];
+ ncrcmd ident_break_atn [ 4];
+ ncrcmd sdata_in [ 6];
+ ncrcmd data_io [ 2];
+ ncrcmd data_io_com [ 8];
+ ncrcmd data_io_out [ 12];
+ ncrcmd resel_bad_lun [ 4];
+ ncrcmd bad_i_t_l [ 4];
+ ncrcmd bad_i_t_l_q [ 4];
+ ncrcmd bad_status [ 6];
+ ncrcmd tweak_pmj [ 12];
+ ncrcmd pm_handle [ 20];
+ ncrcmd pm_handle1 [ 4];
+ ncrcmd pm_save [ 4];
+ ncrcmd pm0_save [ 14];
+ ncrcmd pm1_save [ 14];
+
+ /* WSR handling */
+#ifdef SYM_DEBUG_PM_WITH_WSR
+ ncrcmd pm_wsr_handle [ 44];
+#else
+ ncrcmd pm_wsr_handle [ 42];
+#endif
+ ncrcmd wsr_ma_helper [ 4];
+
+ /* Data area */
+ ncrcmd zero [ 1];
+ ncrcmd scratch [ 1];
+ ncrcmd scratch1 [ 1];
+ ncrcmd pm0_data_addr [ 1];
+ ncrcmd pm1_data_addr [ 1];
+ ncrcmd saved_dsa [ 1];
+ ncrcmd saved_drs [ 1];
+ ncrcmd done_pos [ 1];
+ ncrcmd startpos [ 1];
+ ncrcmd targtbl [ 1];
+ /* End of data area */
+
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ ncrcmd start_ram [ 1];
+ ncrcmd script0_ba [ 4];
+ ncrcmd start_ram64 [ 3];
+ ncrcmd script0_ba64 [ 3];
+ ncrcmd scripth0_ba64 [ 6];
+ ncrcmd ram_seg64 [ 1];
+#endif
+ ncrcmd snooptest [ 6];
+ ncrcmd snoopend [ 2];
+};
+
+/*==========================================================
+**
+**
+** Function headers.
+**
+**
+**==========================================================
+*/
+
+static ccb_p ncr_alloc_ccb (ncb_p np);
+static void ncr_complete (ncb_p np, ccb_p cp);
+static void ncr_exception (ncb_p np);
+static void ncr_free_ccb (ncb_p np, ccb_p cp);
+static ccb_p ncr_ccb_from_dsa(ncb_p np, u_long dsa);
+static void ncr_init_tcb (ncb_p np, u_char tn);
+static lcb_p ncr_alloc_lcb (ncb_p np, u_char tn, u_char ln);
+static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln,
+ u_char *inq_data);
+static void ncr_getclock (ncb_p np, int mult);
+static u_int ncr_getpciclock (ncb_p np);
+static void ncr_selectclock (ncb_p np, u_char scntl3);
+static ccb_p ncr_get_ccb (ncb_p np, u_char tn, u_char ln);
+static void ncr_init (ncb_p np, int reset, char * msg, u_long code);
+static void ncr_int_sbmc (ncb_p np);
+static void ncr_int_par (ncb_p np, u_short sist);
+static void ncr_int_ma (ncb_p np);
+static void ncr_int_sir (ncb_p np);
+static void ncr_int_sto (ncb_p np);
+static void ncr_int_udc (ncb_p np);
+static void ncr_negotiate (ncb_p np, tcb_p tp);
+static int ncr_prepare_nego(ncb_p np, ccb_p cp, u_char *msgptr);
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_char *msgptr);
+#endif
+static void ncr_script_copy_and_bind
+ (ncb_p np, ncrcmd *src, ncrcmd *dst, int len);
+static void ncr_script_fill (struct script * scr, struct scripth * scripth);
+static int ncr_scatter_896R1 (ncb_p np, ccb_p cp, Scsi_Cmnd *cmd);
+static int ncr_scatter (ncb_p np, ccb_p cp, Scsi_Cmnd *cmd);
+static void ncr_getsync (ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p);
+static void ncr_get_xfer_info(ncb_p np, tcb_p tp, u_char *factor, u_char *offset, u_char *width);
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer, u_char scntl4);
+static void ncr_set_sync_wide_status (ncb_p np, u_char target);
+static void ncr_setup_tags (ncb_p np, u_char tn, u_char ln);
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack);
+static void ncr_setsyncwide (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer, u_char scntl4, u_char wide);
+static int ncr_show_msg (u_char * msg);
+static void ncr_print_msg (ccb_p cp, char *label, u_char * msg);
+static int ncr_snooptest (ncb_p np);
+static void ncr_timeout (ncb_p np);
+static void ncr_wakeup (ncb_p np, u_long code);
+static int ncr_wakeup_done (ncb_p np);
+static void ncr_start_next_ccb (ncb_p np, lcb_p lp, int maxn);
+static void ncr_put_start_queue(ncb_p np, ccb_p cp);
+static void ncr_chip_reset (ncb_p np);
+static void ncr_soft_reset (ncb_p np);
+static void ncr_start_reset (ncb_p np);
+static int ncr_reset_scsi_bus (ncb_p np, int enab_int, int settle_delay);
+static int ncr_compute_residual (ncb_p np, ccb_p cp);
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+static void ncr_usercmd (ncb_p np);
+#endif
+
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device);
+static void ncr_free_resources(ncb_p np);
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd);
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd);
+static void process_waiting_list(ncb_p np, int sts);
+
+#define remove_from_waiting_list(np, cmd) \
+ retrieve_from_waiting_list(1, (np), (cmd))
+#define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
+#define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static void ncr_get_nvram (ncr_device *devp, ncr_nvram *nvp);
+static int sym_read_Tekram_nvram (ncr_slot *np, u_short device_id,
+ Tekram_nvram *nvram);
+static int sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram);
+#endif
+
+/*==========================================================
+**
+**
+** Global static data.
+**
+**
+**==========================================================
+*/
+
+static inline char *ncr_name (ncb_p np)
+{
+ return np->inst_name;
+}
+
+
+/*==========================================================
+**
+**
+** Scripts for NCR-Processor.
+**
+** Use ncr_script_bind for binding to physical addresses.
+**
+**
+**==========================================================
+**
+** NADDR generates a reference to a field of the controller data.
+** PADDR generates a reference to another part of the script.
+** RADDR generates a reference to a script processor register.
+** FADDR generates a reference to a script processor register
+** with offset.
+**
+**----------------------------------------------------------
+*/
+
+#define RELOC_SOFTC 0x40000000
+#define RELOC_LABEL 0x50000000
+#define RELOC_REGISTER 0x60000000
+#if 0
+#define RELOC_KVAR 0x70000000
+#endif
+#define RELOC_LABELH 0x80000000
+#define RELOC_MASK 0xf0000000
+
+#define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label))
+#define PADDR(label) (RELOC_LABEL | offsetof(struct script, label))
+#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label))
+#define RADDR(label) (RELOC_REGISTER | REG(label))
+#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
+#define KVAR(which) (RELOC_KVAR | (which))
+
+#define SCR_DATA_ZERO 0xf00ff00f
+
+#ifdef RELOC_KVAR
+#define SCRIPT_KVAR_JIFFIES (0)
+#define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES
+#define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES
+/*
+ * Kernel variables referenced in the scripts.
+ * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
+ */
+static void *script_kvars[] __initdata =
+ { (void *)&jiffies };
+#endif
+
+static struct script script0 __initdata = {
+/*--------------------------< START >-----------------------*/ {
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** Clear SIGP.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+
+ /*
+ ** Stop here if the C code wants to perform
+ ** some error recovery procedure manually.
+ ** (Indicate this by setting SEM in ISTAT)
+ */
+ SCR_FROM_REG (istat),
+ 0,
+ /*
+ ** Report to the C code the next position in
+ ** the start queue the SCRIPTS will schedule.
+ ** The C code must not change SCRATCHA.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (startpos),
+ SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
+ SIR_SCRIPT_STOPPED,
+
+ /*
+ ** Start the next job.
+ **
+ ** @DSA = start point for this job.
+ ** SCRATCHA = address of this job in the start queue.
+ **
+ ** We will restore startpos with SCRATCHA if we fails the
+ ** arbitration or if it is the idle job.
+ **
+ ** The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS
+ ** is a critical path. If it is partially executed, it then
+ ** may happen that the job address is not yet in the DSA
+ ** and the the next queue position points to the next JOB.
+ */
+ SCR_LOAD_ABS (dsa, 4),
+ PADDRH (startpos),
+ SCR_LOAD_REL (temp, 4),
+ 4,
+}/*-------------------------< GETJOB_BEGIN >------------------*/,{
+ SCR_STORE_ABS (temp, 4),
+ PADDRH (startpos),
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+}/*-------------------------< GETJOB_END >--------------------*/,{
+ SCR_LOAD_REL (temp, 4),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< SELECT >----------------------*/,{
+ /*
+ ** DSA contains the address of a scheduled
+ ** data structure.
+ **
+ ** SCRATCHA contains the address of the start queue
+ ** entry which points to the next job.
+ **
+ ** Set Initiator mode.
+ **
+ ** (Target mode is left as an exercise for the reader)
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select),
+ PADDR (ungetjob),
+ /*
+ ** Now there are 4 possibilities:
+ **
+ ** (1) The ncr looses arbitration.
+ ** This is ok, because it will try again,
+ ** when the bus becomes idle.
+ ** (But beware of the timeout function!)
+ **
+ ** (2) The ncr is reselected.
+ ** Then the script processor takes the jump
+ ** to the RESELECT label.
+ **
+ ** (3) The ncr wins arbitration.
+ ** Then it will execute SCRIPTS instruction until
+ ** the next instruction that checks SCSI phase.
+ ** Then will stop and wait for selection to be
+ ** complete or selection time-out to occur.
+ **
+ ** After having won arbitration, the ncr SCRIPTS
+ ** processor is able to execute instructions while
+ ** the SCSI core is performing SCSI selection. But
+ ** some script instruction that is not waiting for
+ ** a valid phase (or selection timeout) to occur
+ ** breaks the selection procedure, by probably
+ ** affecting timing requirements.
+ ** So we have to wait immediately for the next phase
+ ** or the selection to complete or time-out.
+ */
+
+ /*
+ ** load the savep (saved pointer) into
+ ** the actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+
+}/*-------------------------< WF_SEL_DONE >----------------------*/,{
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ SIR_SEL_ATN_NO_MSG_OUT,
+}/*-------------------------< SEND_IDENT >----------------------*/,{
+ /*
+ ** Selection complete.
+ ** Send the IDENTIFY and SIMPLE_TAG messages
+ ** (and the M_X_SYNC_REQ / M_X_WIDE_REQ message)
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct dsb, smsg),
+}/*-------------------------< SELECT2 >----------------------*/,{
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** Set IMMEDIATE ARBITRATION if we have been given
+ ** a hint to do so. (Some job to do after this one).
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ ** Anticipate the COMMAND phase.
+ ** This is the PHASE we expect at this point.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
+ PADDR (sel_no_cmd),
+
+}/*-------------------------< COMMAND >--------------------*/,{
+ /*
+ ** ... and send the command
+ */
+ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct dsb, cmd),
+
+}/*-----------------------< DISPATCH >----------------------*/,{
+ /*
+ ** MSG_IN is the only phase that shall be
+ ** entered at least once for each (re)selection.
+ ** So we test it first.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (msg_in),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
+ PADDR (datao_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
+ PADDR (datai_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+ PADDR (status),
+ SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+ PADDR (command),
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+ PADDRH (msg_out),
+ /*
+ * Discard as many illegal phases as
+ * required and tell the C code about.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
+ -16,
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
+ -16,
+ SCR_INT,
+ SIR_BAD_PHASE,
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*---------------------< SEL_NO_CMD >----------------------*/,{
+ /*
+ ** The target does not switch to command
+ ** phase after IDENTIFY has been sent.
+ **
+ ** If it stays in MSG OUT phase send it
+ ** the IDENTIFY again.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDRH (resend_ident),
+ /*
+ ** If target does not switch to MSG IN phase
+ ** and we sent a negotiation, assert the
+ ** failure immediately.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ /*
+ ** Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< INIT >------------------------*/,{
+ /*
+ ** Wait for the SCSI RESET signal to be
+ ** inactive before restarting operations,
+ ** since the chip may hang on SEL_ATN
+ ** if SCSI RESET is active.
+ */
+ SCR_FROM_REG (sstat0),
+ 0,
+ SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
+ -16,
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< CLRACK >----------------------*/,{
+ /*
+ ** Terminate possible pending message phase.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DISP_STATUS >----------------------*/,{
+ /*
+ ** Anticipate STATUS phase.
+ **
+ ** Does spare 3 SCRIPTS instructions when we have
+ ** completed the INPUT of the data.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+ PADDR (status),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DATAI_DONE >-------------------*/,{
+ /*
+ * If the device wants us to send more data,
+ * we must count the extra bytes.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** If the SWIDE is not full, jump to dispatcher.
+ ** We anticipate a STATUS phase.
+ ** If we get later an IGNORE WIDE RESIDUE, we
+ ** will alias it as a MODIFY DP (-1).
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (WSR, WSR)),
+ PADDR (disp_status),
+ /*
+ ** The SWIDE is full.
+ ** Clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ /*
+ * We are expecting an IGNORE RESIDUE message
+ * from the device, otherwise we are in data
+ * overrun condition. Check against MSG_IN phase.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (disp_status),
+ /*
+ * We are in MSG_IN phase,
+ * Read the first byte of the message.
+ * If it is not an IGNORE RESIDUE message,
+ * signal overrun and jump to message
+ * processing.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[0]),
+ SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ PADDR (msg_in2),
+
+ /*
+ * We got the message we expected.
+ * Read the 2nd byte, and jump to dispatcher.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR (disp_status),
+
+}/*-------------------------< DATAO_DONE >-------------------*/,{
+ /*
+ * If the device wants us to send more data,
+ * we must count the extra bytes.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ PADDRH (data_ovrun),
+ /*
+ ** If the SODL is not full jump to dispatcher.
+ ** We anticipate a MSG IN phase or a STATUS phase.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (WSS, WSS)),
+ PADDR (disp_status),
+ /*
+ ** The SODL is full, clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSS),
+ 0,
+ /*
+ ** And signal a DATA UNDERRUN condition
+ ** to the C code.
+ */
+ SCR_INT,
+ SIR_SODL_UNDERRUN,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< IGN_I_W_R_MSG >--------------*/,{
+ /*
+ ** We jump here from the phase mismatch interrupt,
+ ** When we have a SWIDE and the device has presented
+ ** a IGNORE WIDE RESIDUE message on the BUS.
+ ** We just have to throw away this message and then
+ ** to jump to dispatcher.
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ NADDR (scratch),
+ /*
+ ** Clear ACK and jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< DATAI_PHASE >------------------*/,{
+ SCR_RETURN,
+ 0,
+}/*-------------------------< DATAO_PHASE >------------------*/,{
+ /*
+ ** Patch for 53c1010_66 only - to allow A0 part
+ ** to operate properly in a 33MHz PCI bus.
+ **
+ ** SCR_REG_REG(scntl4, SCR_OR, 0x0c),
+ ** 0,
+ */
+ SCR_NO_OP,
+ 0,
+ SCR_RETURN,
+ 0,
+}/*-------------------------< MSG_IN >--------------------*/,{
+ /*
+ ** Get the first byte of the message.
+ **
+ ** The script processor doesn't negate the
+ ** ACK signal after this transfer.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[0]),
+}/*-------------------------< MSG_IN2 >--------------------*/,{
+ /*
+ ** Check first against 1 byte messages
+ ** that we handle from SCRIPTS.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
+ PADDR (complete),
+ SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
+ PADDR (disconnect),
+ SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
+ PADDR (save_dp),
+ SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
+ PADDR (restore_dp),
+ /*
+ ** We handle all other messages from the
+ ** C code, so no need to waste on-chip RAM
+ ** for those ones.
+ */
+ SCR_JUMP,
+ PADDRH (msg_in_etc),
+
+}/*-------------------------< STATUS >--------------------*/,{
+ /*
+ ** get the status
+ */
+ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ NADDR (scratch),
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If STATUS is not GOOD, clear IMMEDIATE ARBITRATION,
+ ** since we may have to tamper the start queue from
+ ** the C code.
+ */
+ SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_AND, ~IARB),
+ 0,
+#endif
+ /*
+ ** save status to scsi_status.
+ ** mark as complete.
+ */
+ SCR_TO_REG (SS_REG),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ /*
+ ** Anticipate the MESSAGE PHASE for
+ ** the TASK COMPLETE message.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (msg_in),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< COMPLETE >-----------------*/,{
+ /*
+ ** Complete message.
+ **
+ ** Copy the data pointer to LASTP in header.
+ */
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct ccb, phys.header.lastp),
+ /*
+ ** When we terminate the cycle by clearing ACK,
+ ** the target may disconnect immediately.
+ **
+ ** We don't want to be told of an
+ ** "unexpected disconnect",
+ ** so we disable this feature.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ /*
+ ** Terminate cycle ...
+ */
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** ... and wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+}/*-------------------------< COMPLETE2 >-----------------*/,{
+ /*
+ ** Save host status to header.
+ */
+ SCR_STORE_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+
+#ifdef SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+ /*
+ ** Some bridges may reorder DMA writes to memory.
+ ** We donnot want the CPU to deal with completions
+ ** without all the posted write having been flushed
+ ** to memory. This DUMMY READ should flush posted
+ ** buffers prior to the CPU having to deal with
+ ** completions.
+ */
+ SCR_LOAD_REL (scr0, 4), /* DUMMY READ */
+ offsetof (struct ccb, phys.header.status),
+#endif
+ /*
+ ** If command resulted in not GOOD status,
+ ** call the C code if needed.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+ PADDRH (bad_status),
+
+ /*
+ ** If we performed an auto-sense, call
+ ** the C code to synchronyze task aborts
+ ** with UNIT ATTENTION conditions.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_INT ^ IFTRUE (MASK (HF_AUTO_SENSE, HF_AUTO_SENSE)),
+ SIR_AUTO_SENSE_DONE,
+
+}/*------------------------< DONE >-----------------*/,{
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ /*
+ ** It seems that some bridges flush everything
+ ** when the INTR line is raised. For these ones,
+ ** we can just ensure that the INTR line will be
+ ** raised before each completion. So, if it happens
+ ** that we have been faster that the CPU, we just
+ ** have to synchronize with it. A dummy programmed
+ ** interrupt will do the trick.
+ ** Note that we overlap at most 1 IO with the CPU
+ ** in this situation and that the IRQ line must not
+ ** be shared.
+ */
+ SCR_FROM_REG (istat),
+ 0,
+ SCR_INT ^ IFTRUE (MASK (INTF, INTF)),
+ SIR_DUMMY_INTERRUPT,
+#endif
+ /*
+ ** Copy the DSA to the DONE QUEUE and
+ ** signal completion to the host.
+ ** If we are interrupted between DONE
+ ** and DONE_END, we must reset, otherwise
+ ** the completed CCB will be lost.
+ */
+ SCR_STORE_ABS (dsa, 4),
+ PADDRH (saved_dsa),
+ SCR_LOAD_ABS (dsa, 4),
+ PADDRH (done_pos),
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (saved_dsa),
+ SCR_STORE_REL (scratcha, 4),
+ 0,
+ /*
+ ** The instruction below reads the DONE QUEUE next
+ ** free position from memory.
+ ** In addition it ensures that all PCI posted writes
+ ** are flushed and so the DSA value of the done
+ ** CCB is visible by the CPU before INTFLY is raised.
+ */
+ SCR_LOAD_REL (temp, 4),
+ 4,
+ SCR_INT_FLY,
+ 0,
+ SCR_STORE_ABS (temp, 4),
+ PADDRH (done_pos),
+}/*------------------------< DONE_END >-----------------*/,{
+ SCR_JUMP,
+ PADDR (start),
+
+}/*-------------------------< SAVE_DP >------------------*/,{
+ /*
+ ** Clear ACK immediately.
+ ** No need to delay it.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Keep track we received a SAVE DP, so
+ ** we will switch to the other PM context
+ ** on the next PM since the DP may point
+ ** to the current PM context.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+ 0,
+ /*
+ ** SAVE_DP message:
+ ** Copy the data pointer to SAVEP in header.
+ */
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< RESTORE_DP >---------------*/,{
+ /*
+ ** RESTORE_DP message:
+ ** Copy SAVEP in header to actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< DISCONNECT >---------------*/,{
+ /*
+ ** DISCONNECTing ...
+ **
+ ** disable the "unexpected disconnect" feature,
+ ** and remove the ACK signal.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** Wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** Status is: DISCONNECTED.
+ */
+ SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+ 0,
+ /*
+ ** Save host status to header.
+ */
+ SCR_STORE_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+ /*
+ ** If QUIRK_AUTOSAVE is set,
+ ** do an "save pointer" operation.
+ */
+ SCR_FROM_REG (QU_REG),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)),
+ PADDR (start),
+ /*
+ ** like SAVE_DP message:
+ ** Remember we saved the data pointer.
+ ** Copy data pointer to SAVEP in header.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+ 0,
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_JUMP,
+ PADDR (start),
+
+}/*-------------------------< IDLE >------------------------*/,{
+ /*
+ ** Nothing to do?
+ ** Wait for reselect.
+ ** This NOP will be patched with LED OFF
+ ** SCR_REG_REG (gpreg, SCR_OR, 0x01)
+ */
+ SCR_NO_OP,
+ 0,
+#ifdef SCSI_NCR_IARB_SUPPORT
+ SCR_JUMPR,
+ 8,
+#endif
+}/*-------------------------< UNGETJOB >-----------------*/,{
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** Set IMMEDIATE ARBITRATION, for the next time.
+ ** This will give us better chance to win arbitration
+ ** for the job we just wanted to do.
+ */
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ ** We are not able to restart the SCRIPTS if we are
+ ** interrupted and these instruction haven't been
+ ** all executed. BTW, this is very unlikely to
+ ** happen, but we check that from the C code.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_STORE_ABS (scratcha, 4),
+ PADDRH (startpos),
+}/*-------------------------< RESELECT >--------------------*/,{
+ /*
+ ** make the host status invalid.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** Sleep waiting for a reselection.
+ ** If SIGP is set, special treatment.
+ **
+ ** Zu allem bereit ..
+ */
+ SCR_WAIT_RESEL,
+ PADDR(start),
+}/*-------------------------< RESELECTED >------------------*/,{
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** load the target id into the sdid
+ */
+ SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+ 0,
+ SCR_TO_REG (sdid),
+ 0,
+ /*
+ ** load the target control block address
+ */
+ SCR_LOAD_ABS (dsa, 4),
+ PADDRH (targtbl),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0x3c),
+ 0,
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ /*
+ ** Load the synchronous transfer registers.
+ */
+ SCR_LOAD_REL (scntl3, 1),
+ offsetof(struct tcb, wval),
+ SCR_LOAD_REL (sxfer, 1),
+ offsetof(struct tcb, sval),
+}/*-------------------------< RESEL_SCNTL4 >------------------*/,{
+ /*
+ ** Write with uval value. Patch if device
+ ** does not support Ultra3.
+ **
+ ** SCR_LOAD_REL (scntl4, 1),
+ ** offsetof(struct tcb, uval),
+ */
+
+ SCR_NO_OP,
+ 0,
+ /*
+ * We expect MESSAGE IN phase.
+ * If not, get help from the C code.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_RESEL_NO_MSG_IN,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+
+ /*
+ * If IDENTIFY LUN #0, use a faster path
+ * to find the LCB structure.
+ */
+ SCR_JUMPR ^ IFTRUE (MASK (0x80, 0xbf)),
+ 56,
+ /*
+ * If message isn't an IDENTIFY,
+ * tell the C code about.
+ */
+ SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
+ SIR_RESEL_NO_IDENTIFY,
+ /*
+ * It is an IDENTIFY message,
+ * Load the LUN control block address.
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct tcb, b_luntbl),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0xfc),
+ 0,
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ SCR_JUMPR,
+ 8,
+ /*
+ ** LUN 0 special case (but usual one :))
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct tcb, b_lun0),
+
+ /*
+ ** Load the reselect task action for this LUN.
+ ** Load the tasks DSA array for this LUN.
+ ** Call the action.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct lcb, resel_task),
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct lcb, b_tasktbl),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< RESEL_TAG >-------------------*/,{
+ /*
+ ** ACK the IDENTIFY or TAG previously received
+ */
+
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Read IDENTIFY + SIMPLE + TAG using a single MOVE.
+ ** Agressive optimization, is'nt it?
+ ** No need to test the SIMPLE TAG message, since the
+ ** driver only supports conformant devices for tags. ;-)
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ /*
+ ** Read the TAG from the SIDL.
+ ** Still an aggressive optimization. ;-)
+ ** Compute the CCB indirect jump address which
+ ** is (#TAG*2 & 0xfc) due to tag numbering using
+ ** 1,3,5..MAXTAGS*2+1 actual values.
+ */
+ SCR_REG_SFBR (sidl, SCR_SHL, 0),
+ 0,
+#if MAX_TASKS*4 > 512
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 2),
+ 0,
+ SCR_REG_REG (sfbr, SCR_SHL, 0),
+ 0,
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#elif MAX_TASKS*4 > 256
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#endif
+ /*
+ ** Retrieve the DSA of this task.
+ ** JUMP indirectly to the restart point of the CCB.
+ */
+ SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
+ 0,
+}/*-------------------------< RESEL_GO >-------------------*/,{
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct ccb, phys.header.go.restart),
+ SCR_RETURN,
+ 0,
+ /* In normal situations we branch to RESEL_DSA */
+}/*-------------------------< RESEL_NOTAG >-------------------*/,{
+ /*
+ ** JUMP indirectly to the restart point of the CCB.
+ */
+ SCR_JUMP,
+ PADDR (resel_go),
+
+}/*-------------------------< RESEL_DSA >-------------------*/,{
+ /*
+ ** Ack the IDENTIFY or TAG previously received.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** load the savep (saved pointer) into
+ ** the actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+ /*
+ ** Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DATA_IN >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CHMOV_TBL ^ SCR_DATA_IN,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< DATA_IN2 >-------------------*/,{
+ SCR_CALL,
+ PADDR (datai_done),
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+}/*-------------------------< DATA_OUT >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< DATA_OUT2 >-------------------*/,{
+ SCR_CALL,
+ PADDR (datao_done),
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+
+}/*-------------------------< PM0_DATA >--------------------*/,{
+ /*
+ ** Read our host flags to SFBR, so we will be able
+ ** to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ ** Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR (pm0_data_out),
+ /*
+ ** Actual phase is DATA IN.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ ** Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.pm0.sg),
+ SCR_JUMP,
+ PADDR (pm0_data_end),
+}/*-------------------------< PM0_DATA_OUT >----------------*/,{
+ /*
+ ** Actual phase is DATA OUT.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ ** Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct ccb, phys.pm0.sg),
+}/*-------------------------< PM0_DATA_END >----------------*/,{
+ /*
+ ** Clear the flag that told we were moving
+ ** data from the PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
+ 0,
+ /*
+ ** Return to the previous DATA script which
+ ** is guaranteed by design (if no bug) to be
+ ** the main DATA script for this transfer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.pm0.ret),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< PM1_DATA >--------------------*/,{
+ /*
+ ** Read our host flags to SFBR, so we will be able
+ ** to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ ** Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR (pm1_data_out),
+ /*
+ ** Actual phase is DATA IN.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ ** Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.pm1.sg),
+ SCR_JUMP,
+ PADDR (pm1_data_end),
+}/*-------------------------< PM1_DATA_OUT >----------------*/,{
+ /*
+ ** Actual phase is DATA OUT.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ ** Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct ccb, phys.pm1.sg),
+}/*-------------------------< PM1_DATA_END >----------------*/,{
+ /*
+ ** Clear the flag that told we were moving
+ ** data from the PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
+ 0,
+ /*
+ ** Return to the previous DATA script which
+ ** is guaranteed by design (if no bug) to be
+ ** the main DATA script for this transfer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.pm1.ret),
+ SCR_RETURN,
+ 0,
+}/*---------------------------------------------------------*/
+};
+
+
+static struct scripth scripth0 __initdata = {
+/*------------------------< START64 >-----------------------*/{
+ /*
+ ** SCRIPT entry point for the 895A and the 896.
+ ** For now, there is no specific stuff for that
+ ** chip at this point, but this may come.
+ */
+ SCR_JUMP,
+ PADDR (init),
+}/*-------------------------< NO_DATA >-------------------*/,{
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+}/*-----------------------< SEL_FOR_ABORT >------------------*/,{
+ /*
+ ** We are jumped here by the C code, if we have
+ ** some target to reset or some disconnected
+ ** job to abort. Since error recovery is a serious
+ ** busyness, we will really reset the SCSI BUS, if
+ ** case of a SCSI interrupt occurring in this path.
+ */
+
+ /*
+ ** Set initiator mode.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct ncb, abrt_sel),
+ PADDR (reselect),
+
+ /*
+ ** Wait for the selection to complete or
+ ** the selection to time out.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ -8,
+ /*
+ ** Call the C code.
+ */
+ SCR_INT,
+ SIR_TARGET_SELECTED,
+ /*
+ ** The C code should let us continue here.
+ ** Send the 'kiss of death' message.
+ ** We expect an immediate disconnect once
+ ** the target has eaten the message.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct ncb, abrt_tbl),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** Tell the C code that we are done.
+ */
+ SCR_INT,
+ SIR_ABORT_SENT,
+}/*-----------------------< SEL_FOR_ABORT_1 >--------------*/,{
+ /*
+ ** Jump at scheduler.
+ */
+ SCR_JUMP,
+ PADDR (start),
+
+}/*------------------------< SELECT_NO_ATN >-----------------*/,{
+ /*
+ ** Set Initiator mode.
+ ** And try to select this target without ATN.
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ SCR_SEL_TBL ^ offsetof (struct dsb, select),
+ PADDR (ungetjob),
+ /*
+ ** load the savep (saved pointer) into
+ ** the actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+
+}/*------------------------< WF_SEL_DONE_NO_ATN >-----------------*/,{
+ /*
+ ** Wait immediately for the next phase or
+ ** the selection to complete or time-out.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ 0,
+ SCR_JUMP,
+ PADDR (select2),
+
+}/*-------------------------< MSG_IN_ETC >--------------------*/,{
+ /*
+ ** If it is an EXTENDED (variable size message)
+ ** Handle it.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+ PADDRH (msg_extended),
+ /*
+ ** Let the C code handle any other
+ ** 1 byte message.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)),
+ PADDRH (msg_received),
+ SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)),
+ PADDRH (msg_received),
+ /*
+ ** We donnot handle 2 bytes messages from SCRIPTS.
+ ** So, let the C code deal with these ones too.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)),
+ PADDRH (msg_weird_seen),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ SCR_JUMP,
+ PADDRH (msg_received),
+
+}/*-------------------------< MSG_RECEIVED >--------------------*/,{
+ SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */
+ 0,
+ SCR_INT,
+ SIR_MSG_RECEIVED,
+
+}/*-------------------------< MSG_WEIRD_SEEN >------------------*/,{
+ SCR_LOAD_REL (scratcha1, 4), /* DUMMY READ */
+ 0,
+ SCR_INT,
+ SIR_MSG_WEIRD,
+
+}/*-------------------------< MSG_EXTENDED >--------------------*/,{
+ /*
+ ** Clear ACK and get the next byte
+ ** assumed to be the message length.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ /*
+ ** Try to catch some unlikely situations as 0 length
+ ** or too large the length.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (0)),
+ PADDRH (msg_weird_seen),
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
+ 0,
+ SCR_JUMP ^ IFTRUE (CARRYSET),
+ PADDRH (msg_weird_seen),
+ /*
+ ** We donnot handle extended messages from SCRIPTS.
+ ** Read the amount of data correponding to the
+ ** message length and call the C code.
+ */
+ SCR_STORE_REL (scratcha, 1),
+ offsetof (struct dsb, smsg_ext.size),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_TBL ^ SCR_MSG_IN,
+ offsetof (struct dsb, smsg_ext),
+ SCR_JUMP,
+ PADDRH (msg_received),
+
+}/*-------------------------< MSG_BAD >------------------*/,{
+ /*
+ ** unimplemented message - reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< MSG_WEIRD >--------------------*/,{
+ /*
+ ** weird message received
+ ** ignore all MSG IN phases and reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+}/*-------------------------< MSG_WEIRD1 >--------------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (scratch),
+ SCR_JUMP,
+ PADDRH (msg_weird1),
+}/*-------------------------< WDTR_RESP >----------------*/,{
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_WDTR >----------------*/,{
+ /*
+ ** Send the M_X_WIDE_REQ
+ */
+ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_JUMP,
+ PADDRH (msg_out_done),
+
+}/*-------------------------< SDTR_RESP >-------------*/,{
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_SDTR >-------------*/,{
+ /*
+ ** Send the M_X_SYNC_REQ
+ */
+ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_JUMP,
+ PADDRH (msg_out_done),
+
+}/*-------------------------< PPR_RESP >-------------*/,{
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_PPR >-------------*/,{
+ /*
+ ** Send the M_X_PPR_REQ
+ */
+ SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_JUMP,
+ PADDRH (msg_out_done),
+
+}/*-------------------------< NEGO_BAD_PHASE >------------*/,{
+ SCR_INT,
+ SIR_NEGO_PROTO,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< MSG_OUT >-------------------*/,{
+ /*
+ ** The target requests a message.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ /*
+ ** ... wait for the next phase
+ ** if it's a message out, send it again, ...
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDRH (msg_out),
+}/*-------------------------< MSG_OUT_DONE >--------------*/,{
+ /*
+ ** ... else clear the message ...
+ */
+ SCR_INT,
+ SIR_MSG_OUT_DONE,
+ /*
+ ** ... and process the next phase
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DATA_OVRUN >-----------------------*/,{
+ /*
+ * Use scratcha to count the extra bytes.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (zero),
+}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
+ /*
+ * The target may want to transfer too much data.
+ *
+ * If phase is DATA OUT write 1 byte and count it.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+ 16,
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
+ NADDR (scratch),
+ SCR_JUMP,
+ PADDRH (data_ovrun2),
+ /*
+ * If WSR is set, clear this condition, and
+ * count this byte.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+ 16,
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ SCR_JUMP,
+ PADDRH (data_ovrun2),
+ /*
+ * Finally check against DATA IN phase.
+ * Signal data overrun to the C code
+ * and jump to dispatcher if not so.
+ * Read 1 byte otherwise and count it.
+ */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ 16,
+ SCR_INT,
+ SIR_DATA_OVERRUN,
+ SCR_JUMP,
+ PADDR (dispatch),
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
+ NADDR (scratch),
+}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
+ /*
+ * Count this byte.
+ * This will allow to return a negative
+ * residual to user.
+ */
+ SCR_REG_REG (scratcha, SCR_ADD, 0x01),
+ 0,
+ SCR_REG_REG (scratcha1, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (scratcha2, SCR_ADDC, 0),
+ 0,
+ /*
+ * .. and repeat as required.
+ */
+ SCR_JUMP,
+ PADDRH (data_ovrun1),
+
+}/*-------------------------< ABORT_RESEL >----------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** send the abort/abortag/reset message
+ ** we expect an immediate disconnect
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ SCR_INT,
+ SIR_RESEL_ABORTED,
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< RESEND_IDENT >-------------------*/,{
+ /*
+ ** The target stays in MSG OUT phase after having acked
+ ** Identify [+ Tag [+ Extended message ]]. Targets shall
+ ** behave this way on parity error.
+ ** We must send it again all the messages.
+ */
+ SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */
+ 0, /* 1rst ACK = 90 ns. Hope the NCR is'nt too fast */
+ SCR_JUMP,
+ PADDR (send_ident),
+}/*-------------------------< IDENT_BREAK >-------------------*/,{
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (select2),
+}/*-------------------------< IDENT_BREAK_ATN >----------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (select2),
+}/*-------------------------< SDATA_IN >-------------------*/,{
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct dsb, sense),
+ SCR_CALL,
+ PADDR (datai_done),
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+}/*-------------------------< DATA_IO >--------------------*/,{
+ /*
+ ** We jump here if the data direction was unknown at the
+ ** time we had to queue the command to the scripts processor.
+ ** Pointers had been set as follow in this situation:
+ ** savep --> DATA_IO
+ ** lastp --> start pointer when DATA_IN
+ ** goalp --> goal pointer when DATA_IN
+ ** wlastp --> start pointer when DATA_OUT
+ ** wgoalp --> goal pointer when DATA_OUT
+ ** This script sets savep/lastp/goalp according to the
+ ** direction chosen by the target.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ PADDRH(data_io_out),
+}/*-------------------------< DATA_IO_COM >-----------------*/,{
+ /*
+ ** Direction is DATA IN.
+ ** Warning: we jump here, even when phase is DATA OUT.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.lastp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.savep),
+
+ /*
+ ** Jump to the SCRIPTS according to actual direction.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< DATA_IO_OUT >-----------------*/,{
+ /*
+ ** Direction is DATA OUT.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN)),
+ 0,
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.wlastp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.lastp),
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.wgoalp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.goalp),
+ SCR_JUMP,
+ PADDRH(data_io_com),
+
+}/*-------------------------< RESEL_BAD_LUN >---------------*/,{
+ /*
+ ** Message is an IDENTIFY, but lun is unknown.
+ ** Signal problem to C code for logging the event.
+ ** Send a M_ABORT to clear all pending tasks.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_LUN,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< BAD_I_T_L >------------------*/,{
+ /*
+ ** We donnot have a task for that I_T_L.
+ ** Signal problem to C code for logging the event.
+ ** Send a M_ABORT message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< BAD_I_T_L_Q >----------------*/,{
+ /*
+ ** We donnot have a task that matches the tag.
+ ** Signal problem to C code for logging the event.
+ ** Send a M_ABORTTAG message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L_Q,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< BAD_STATUS >-----------------*/,{
+ /*
+ ** Anything different from INTERMEDIATE
+ ** CONDITION MET should be a bad SCSI status,
+ ** given that GOOD status has already been tested.
+ ** Call the C code.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (startpos),
+ SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
+ SIR_BAD_STATUS,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< TWEAK_PMJ >------------------*/,{
+ /*
+ ** Disable PM handling from SCRIPTS for the data phase
+ ** and so force PM to be handled from C code if HF_PM_TO_C
+ ** flag is set.
+ */
+ SCR_FROM_REG(HF_REG),
+ 0,
+ SCR_JUMPR ^ IFTRUE (MASK (HF_PM_TO_C, HF_PM_TO_C)),
+ 16,
+ SCR_REG_REG (ccntl0, SCR_OR, ENPMJ),
+ 0,
+ SCR_RETURN,
+ 0,
+ SCR_REG_REG (ccntl0, SCR_AND, (~ENPMJ)),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< PM_HANDLE >------------------*/,{
+ /*
+ ** Phase mismatch handling.
+ **
+ ** Since we have to deal with 2 SCSI data pointers
+ ** (current and saved), we need at least 2 contexts.
+ ** Each context (pm0 and pm1) has a saved area, a
+ ** SAVE mini-script and a DATA phase mini-script.
+ */
+ /*
+ ** Get the PM handling flags.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ ** If no flags (1rst PM for example), avoid
+ ** all the below heavy flags testing.
+ ** This makes the normal case a bit faster.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED))),
+ PADDRH (pm_handle1),
+ /*
+ ** If we received a SAVE DP, switch to the
+ ** other PM context since the savep may point
+ ** to the current PM context.
+ */
+ SCR_JUMPR ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED)),
+ 8,
+ SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM),
+ 0,
+ /*
+ ** If we have been interrupt in a PM DATA mini-script,
+ ** we take the return address from the corresponding
+ ** saved area.
+ ** This ensure the return address always points to the
+ ** main DATA script for this transfer.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1))),
+ PADDRH (pm_handle1),
+ SCR_JUMPR ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0)),
+ 16,
+ SCR_LOAD_REL (ia, 4),
+ offsetof(struct ccb, phys.pm0.ret),
+ SCR_JUMP,
+ PADDRH (pm_save),
+ SCR_LOAD_REL (ia, 4),
+ offsetof(struct ccb, phys.pm1.ret),
+ SCR_JUMP,
+ PADDRH (pm_save),
+}/*-------------------------< PM_HANDLE1 >-----------------*/,{
+ /*
+ ** Normal case.
+ ** Update the return address so that it
+ ** will point after the interrupted MOVE.
+ */
+ SCR_REG_REG (ia, SCR_ADD, 8),
+ 0,
+ SCR_REG_REG (ia1, SCR_ADDC, 0),
+ 0,
+}/*-------------------------< PM_SAVE >--------------------*/,{
+ /*
+ ** Clear all the flags that told us if we were
+ ** interrupted in a PM DATA mini-script and/or
+ ** we received a SAVE DP.
+ */
+ SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED))),
+ 0,
+ /*
+ ** Choose the current PM context.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM)),
+ PADDRH (pm1_save),
+}/*-------------------------< PM0_SAVE >-------------------*/,{
+ SCR_STORE_REL (ia, 4),
+ offsetof(struct ccb, phys.pm0.ret),
+ /*
+ ** If WSR bit is set, either UA and RBC may
+ ** have to be changed whatever the device wants
+ ** to ignore this residue ot not.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+ PADDRH (pm_wsr_handle),
+ /*
+ ** Save the remaining byte count, the updated
+ ** address and the return address.
+ */
+ SCR_STORE_REL (rbc, 4),
+ offsetof(struct ccb, phys.pm0.sg.size),
+ SCR_STORE_REL (ua, 4),
+ offsetof(struct ccb, phys.pm0.sg.addr),
+ /*
+ ** Set the current pointer at the PM0 DATA mini-script.
+ */
+ SCR_LOAD_ABS (temp, 4),
+ PADDRH (pm0_data_addr),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< PM1_SAVE >-------------------*/,{
+ SCR_STORE_REL (ia, 4),
+ offsetof(struct ccb, phys.pm1.ret),
+ /*
+ ** If WSR bit is set, either UA and RBC may
+ ** have been changed whatever the device wants
+ ** to ignore this residue or not.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+ PADDRH (pm_wsr_handle),
+ /*
+ ** Save the remaining byte count, the updated
+ ** address and the return address.
+ */
+ SCR_STORE_REL (rbc, 4),
+ offsetof(struct ccb, phys.pm1.sg.size),
+ SCR_STORE_REL (ua, 4),
+ offsetof(struct ccb, phys.pm1.sg.addr),
+ /*
+ ** Set the current pointer at the PM1 DATA mini-script.
+ */
+ SCR_LOAD_ABS (temp, 4),
+ PADDRH (pm1_data_addr),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*--------------------------< PM_WSR_HANDLE >-----------------------*/,{
+ /*
+ * Phase mismatch handling from SCRIPT with WSR set.
+ * Such a condition can occur if the chip wants to
+ * execute a CHMOV(size > 1) when the WSR bit is
+ * set and the target changes PHASE.
+ */
+#ifdef SYM_DEBUG_PM_WITH_WSR
+ /*
+ * Some debugging may still be needed.:)
+ */
+ SCR_INT,
+ SIR_PM_WITH_WSR,
+#endif
+ /*
+ * We must move the residual byte to memory.
+ *
+ * UA contains bit 0..31 of the address to
+ * move the residual byte.
+ * Move it to the table indirect.
+ */
+ SCR_STORE_REL (ua, 4),
+ offsetof (struct ccb, phys.wresid.addr),
+ /*
+ * Increment UA (move address to next position).
+ */
+ SCR_REG_REG (ua, SCR_ADD, 1),
+ 0,
+ SCR_REG_REG (ua1, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (ua2, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (ua3, SCR_ADDC, 0),
+ 0,
+ /*
+ * Compute SCRATCHA as:
+ * - size to transfer = 1 byte.
+ * - bit 24..31 = high address bit [32...39].
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (zero),
+ SCR_REG_REG (scratcha, SCR_OR, 1),
+ 0,
+ SCR_FROM_REG (rbc3),
+ 0,
+ SCR_TO_REG (scratcha3),
+ 0,
+ /*
+ * Move this value to the table indirect.
+ */
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.wresid.size),
+ /*
+ * Wait for a valid phase.
+ * While testing with bogus QUANTUM drives, the C1010
+ * sometimes raised a spurious phase mismatch with
+ * WSR and the CHMOV(1) triggered another PM.
+ * Waiting explicitely for the PHASE seemed to avoid
+ * the nested phase mismatch. Btw, this didn't happen
+ * using my IBM drives.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ 0,
+ /*
+ * Perform the move of the residual byte.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.wresid),
+ /*
+ * We can now handle the phase mismatch with UA fixed.
+ * RBC[0..23]=0 is a special case that does not require
+ * a PM context. The C code also checks against this.
+ */
+ SCR_FROM_REG (rbc),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ SCR_FROM_REG (rbc1),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ SCR_FROM_REG (rbc2),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ /*
+ * RBC[0..23]=0.
+ * Not only we donnot need a PM context, but this would
+ * lead to a bogus CHMOV(0). This condition means that
+ * the residual was the last byte to move from this CHMOV.
+ * So, we just have to move the current data script pointer
+ * (i.e. TEMP) to the SCRIPTS address following the
+ * interrupted CHMOV and jump to dispatcher.
+ */
+ SCR_STORE_ABS (ia, 4),
+ PADDRH (scratch),
+ SCR_LOAD_ABS (temp, 4),
+ PADDRH (scratch),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*--------------------------< WSR_MA_HELPER >-----------------------*/,{
+ /*
+ * Helper for the C code when WSR bit is set.
+ * Perform the move of the residual byte.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.wresid),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< ZERO >------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH >---------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH1 >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< PM0_DATA_ADDR >---------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< PM1_DATA_ADDR >---------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SAVED_DSA >-------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SAVED_DRS >-------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< DONE_POS >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< STARTPOS >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< TARGTBL >---------------------*/,{
+ SCR_DATA_ZERO,
+
+
+/*
+** We may use MEMORY MOVE instructions to load the on chip-RAM,
+** if it happens that mapping PCI memory is not possible.
+** But writing the RAM from the CPU is the preferred method,
+** since PCI 2.2 seems to disallow PCI self-mastering.
+*/
+
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+
+}/*-------------------------< START_RAM >-------------------*/,{
+ /*
+ ** Load the script into on-chip RAM,
+ ** and jump to start point.
+ */
+ SCR_COPY (sizeof (struct script)),
+}/*-------------------------< SCRIPT0_BA >--------------------*/,{
+ 0,
+ PADDR (start),
+ SCR_JUMP,
+ PADDR (init),
+
+}/*-------------------------< START_RAM64 >--------------------*/,{
+ /*
+ ** Load the RAM and start for 64 bit PCI (895A,896).
+ ** Both scripts (script and scripth) are loaded into
+ ** the RAM which is 8K (4K for 825A/875/895).
+ ** We also need to load some 32-63 bit segments
+ ** address of the SCRIPTS processor.
+ ** LOAD/STORE ABSOLUTE always refers to on-chip RAM
+ ** in our implementation. The main memory is
+ ** accessed using LOAD/STORE DSA RELATIVE.
+ */
+ SCR_LOAD_REL (mmws, 4),
+ offsetof (struct ncb, scr_ram_seg),
+ SCR_COPY (sizeof(struct script)),
+}/*-------------------------< SCRIPT0_BA64 >--------------------*/,{
+ 0,
+ PADDR (start),
+ SCR_COPY (sizeof(struct scripth)),
+}/*-------------------------< SCRIPTH0_BA64 >--------------------*/,{
+ 0,
+ PADDRH (start64),
+ SCR_LOAD_REL (mmrs, 4),
+ offsetof (struct ncb, scr_ram_seg),
+ SCR_JUMP64,
+ PADDRH (start64),
+}/*-------------------------< RAM_SEG64 >--------------------*/,{
+ 0,
+
+#endif /* SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+
+}/*-------------------------< SNOOPTEST >-------------------*/,{
+ /*
+ ** Read the variable.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof(struct ncb, ncr_cache),
+ SCR_STORE_REL (temp, 4),
+ offsetof(struct ncb, ncr_cache),
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct ncb, ncr_cache),
+}/*-------------------------< SNOOPEND >-------------------*/,{
+ /*
+ ** And stop.
+ */
+ SCR_INT,
+ 99,
+}/*--------------------------------------------------------*/
+};
+
+/*==========================================================
+**
+**
+** Fill in #define dependent parts of the script
+**
+**
+**==========================================================
+*/
+
+void __init ncr_script_fill (struct script * scr, struct scripth * scrh)
+{
+ int i;
+ ncrcmd *p;
+
+ p = scr->data_in;
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CHMOV_TBL ^ SCR_DATA_IN;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ assert ((u_long)p == (u_long)&scr->data_in + sizeof (scr->data_in));
+
+ p = scr->data_out;
+
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CHMOV_TBL ^ SCR_DATA_OUT;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ assert ((u_long)p == (u_long)&scr->data_out + sizeof (scr->data_out));
+}
+
+/*==========================================================
+**
+**
+** Copy and rebind a script.
+**
+**
+**==========================================================
+*/
+
+static void __init
+ncr_script_copy_and_bind (ncb_p np,ncrcmd *src,ncrcmd *dst,int len)
+{
+ ncrcmd opcode, new, old, tmp1, tmp2;
+ ncrcmd *start, *end;
+ int relocs;
+ int opchanged = 0;
+
+ start = src;
+ end = src + len/4;
+
+ while (src < end) {
+
+ opcode = *src++;
+ *dst++ = cpu_to_scr(opcode);
+
+ /*
+ ** If we forget to change the length
+ ** in struct script, a field will be
+ ** padded with 0. This is an illegal
+ ** command.
+ */
+
+ if (opcode == 0) {
+ printk (KERN_INFO "%s: ERROR0 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ MDELAY (10000);
+ continue;
+ };
+
+ /*
+ ** We use the bogus value 0xf00ff00f ;-)
+ ** to reserve data area in SCRIPTS.
+ */
+ if (opcode == SCR_DATA_ZERO) {
+ dst[-1] = 0;
+ continue;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_SCRIPT)
+ printk (KERN_INFO "%p: <%x>\n",
+ (src-1), (unsigned)opcode);
+
+ /*
+ ** We don't have to decode ALL commands
+ */
+ switch (opcode >> 28) {
+
+ case 0xf:
+ /*
+ ** LOAD / STORE DSA relative, don't relocate.
+ */
+ relocs = 0;
+ break;
+ case 0xe:
+ /*
+ ** LOAD / STORE absolute.
+ */
+ relocs = 1;
+ break;
+ case 0xc:
+ /*
+ ** COPY has TWO arguments.
+ */
+ relocs = 2;
+ tmp1 = src[0];
+ tmp2 = src[1];
+#ifdef RELOC_KVAR
+ if ((tmp1 & RELOC_MASK) == RELOC_KVAR)
+ tmp1 = 0;
+ if ((tmp2 & RELOC_MASK) == RELOC_KVAR)
+ tmp2 = 0;
+#endif
+ if ((tmp1 ^ tmp2) & 3) {
+ printk (KERN_ERR"%s: ERROR1 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ MDELAY (1000);
+ }
+ /*
+ ** If PREFETCH feature not enabled, remove
+ ** the NO FLUSH bit if present.
+ */
+ if ((opcode & SCR_NO_FLUSH) &&
+ !(np->features & FE_PFEN)) {
+ dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH);
+ ++opchanged;
+ }
+ break;
+
+ case 0x0:
+ /*
+ ** MOVE/CHMOV (absolute address)
+ */
+ if (!(np->features & FE_WIDE))
+ dst[-1] = cpu_to_scr(opcode | OPC_MOVE);
+ relocs = 1;
+ break;
+
+ case 0x1:
+ /*
+ ** MOVE/CHMOV (table indirect)
+ */
+ if (!(np->features & FE_WIDE))
+ dst[-1] = cpu_to_scr(opcode | OPC_MOVE);
+ relocs = 0;
+ break;
+
+ case 0x8:
+ /*
+ ** JUMP / CALL
+ ** dont't relocate if relative :-)
+ */
+ if (opcode & 0x00800000)
+ relocs = 0;
+ else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
+ relocs = 2;
+ else
+ relocs = 1;
+ break;
+
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ relocs = 1;
+ break;
+
+ default:
+ relocs = 0;
+ break;
+ };
+
+ if (!relocs) {
+ *dst++ = cpu_to_scr(*src++);
+ continue;
+ }
+ while (relocs--) {
+ old = *src++;
+
+ switch (old & RELOC_MASK) {
+ case RELOC_REGISTER:
+ new = (old & ~RELOC_MASK) + pcivtobus(np->base_ba);
+ break;
+ case RELOC_LABEL:
+ new = (old & ~RELOC_MASK) + np->p_script;
+ break;
+ case RELOC_LABELH:
+ new = (old & ~RELOC_MASK) + np->p_scripth;
+ break;
+ case RELOC_SOFTC:
+ new = (old & ~RELOC_MASK) + np->p_ncb;
+ break;
+#ifdef RELOC_KVAR
+ case RELOC_KVAR:
+ new=0;
+ if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) ||
+ ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST))
+ panic("ncr KVAR out of range");
+ new = vtobus(script_kvars[old & ~RELOC_MASK]);
+#endif
+ break;
+ case 0:
+ /* Don't relocate a 0 address. */
+ if (old == 0) {
+ new = old;
+ break;
+ }
+ /* fall through */
+ default:
+ new = 0; /* For 'cc' not to complain */
+ panic("ncr_script_copy_and_bind: "
+ "weird relocation %x\n", old);
+ break;
+ }
+
+ *dst++ = cpu_to_scr(new);
+ }
+ };
+}
+
+/*==========================================================
+**
+**
+** Auto configuration: attach and init a host adapter.
+**
+**
+**==========================================================
+*/
+
+/*
+** Linux host data structure.
+*/
+
+struct host_data {
+ struct ncb *ncb;
+};
+
+/*
+** Print something which allows to retrieve the controler type, unit,
+** target, lun concerned by a kernel message.
+*/
+
+static void PRINT_TARGET(ncb_p np, int target)
+{
+ printk(KERN_INFO "%s-<%d,*>: ", ncr_name(np), target);
+}
+
+static void PRINT_LUN(ncb_p np, int target, int lun)
+{
+ printk(KERN_INFO "%s-<%d,%d>: ", ncr_name(np), target, lun);
+}
+
+static void PRINT_ADDR(Scsi_Cmnd *cmd)
+{
+ struct host_data *host_data = (struct host_data *) cmd->host->hostdata;
+ PRINT_LUN(host_data->ncb, cmd->target, cmd->lun);
+}
+
+/*==========================================================
+**
+** NCR chip clock divisor table.
+** Divisors are multiplied by 10,000,000 in order to make
+** calculations more simple.
+**
+**==========================================================
+*/
+
+#define _5M 5000000
+static u_long div_10M[] =
+ {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
+
+
+/*===============================================================
+**
+** Prepare io register values used by ncr_init() according
+** to selected and supported features.
+**
+** NCR/SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
+** 128 transfers. All chips support at least 16 transfers bursts.
+** The 825A, 875 and 895 chips support bursts of up to 128
+** transfers and the 895A and 896 support bursts of up to 64
+** transfers. All other chips support up to 16 transfers bursts.
+**
+** For PCI 32 bit data transfers each transfer is a DWORD (4 bytes).
+** It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
+** Only the 896 is able to perform 64 bit data transfers.
+**
+** We use log base 2 (burst length) as internal code, with
+** value 0 meaning "burst disabled".
+**
+**===============================================================
+*/
+
+/*
+ * Burst length from burst code.
+ */
+#define burst_length(bc) (!(bc))? 0 : 1 << (bc)
+
+/*
+ * Burst code from io register bits.
+ */
+#define burst_code(dmode, ctest4, ctest5) \
+ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
+
+/*
+ * Set initial io register bits from burst code.
+ */
+static inline void ncr_init_burst(ncb_p np, u_char bc)
+{
+ np->rv_ctest4 &= ~0x80;
+ np->rv_dmode &= ~(0x3 << 6);
+ np->rv_ctest5 &= ~0x4;
+
+ if (!bc) {
+ np->rv_ctest4 |= 0x80;
+ }
+ else {
+ --bc;
+ np->rv_dmode |= ((bc & 0x3) << 6);
+ np->rv_ctest5 |= (bc & 0x4);
+ }
+}
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/*
+** Get target set-up from Symbios format NVRAM.
+*/
+
+static void __init
+ncr_Symbios_setup_target(ncb_p np, int target, Symbios_nvram *nvram)
+{
+ tcb_p tp = &np->target[target];
+ Symbios_target *tn = &nvram->target[target];
+
+ tp->usrsync = tn->sync_period ? (tn->sync_period + 3) / 4 : 255;
+ tp->usrwide = tn->bus_width == 0x10 ? 1 : 0;
+ tp->usrtags =
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? MAX_TAGS : 0;
+
+ if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
+ tp->usrflag |= UF_NODISC;
+ if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
+ tp->usrflag |= UF_NOSCAN;
+}
+
+/*
+** Get target set-up from Tekram format NVRAM.
+*/
+
+static void __init
+ncr_Tekram_setup_target(ncb_p np, int target, Tekram_nvram *nvram)
+{
+ tcb_p tp = &np->target[target];
+ struct Tekram_target *tn = &nvram->target[target];
+ int i;
+
+ if (tn->flags & TEKRAM_SYNC_NEGO) {
+ i = tn->sync_index & 0xf;
+ tp->usrsync = Tekram_sync[i];
+ }
+
+ tp->usrwide = (tn->flags & TEKRAM_WIDE_NEGO) ? 1 : 0;
+
+ if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
+ tp->usrtags = 2 << nvram->max_tags_index;
+ }
+
+ if (!(tn->flags & TEKRAM_DISCONNECT_ENABLE))
+ tp->usrflag = UF_NODISC;
+
+ /* If any device does not support parity, we will not use this option */
+ if (!(tn->flags & TEKRAM_PARITY_CHECK))
+ np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Save initial settings of some IO registers.
+** Assumed to have been set by BIOS.
+*/
+static void __init ncr_save_initial_setting(ncb_p np)
+{
+ np->sv_scntl0 = INB(nc_scntl0) & 0x0a;
+ np->sv_dmode = INB(nc_dmode) & 0xce;
+ np->sv_dcntl = INB(nc_dcntl) & 0xa8;
+ np->sv_ctest3 = INB(nc_ctest3) & 0x01;
+ np->sv_ctest4 = INB(nc_ctest4) & 0x80;
+ np->sv_gpcntl = INB(nc_gpcntl);
+ np->sv_stest2 = INB(nc_stest2) & 0x20;
+ np->sv_stest4 = INB(nc_stest4);
+ np->sv_stest1 = INB(nc_stest1);
+
+ np->sv_scntl3 = INB(nc_scntl3) & 0x07;
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66) ){
+ /*
+ ** C1010 always uses large fifo, bit 5 rsvd
+ ** scntl4 used ONLY with C1010
+ */
+ np->sv_ctest5 = INB(nc_ctest5) & 0x04 ;
+ np->sv_scntl4 = INB(nc_scntl4);
+ }
+ else {
+ np->sv_ctest5 = INB(nc_ctest5) & 0x24 ;
+ np->sv_scntl4 = 0;
+ }
+}
+
+/*
+** Prepare io register values used by ncr_init()
+** according to selected and supported features.
+*/
+static int __init ncr_prepare_setting(ncb_p np, ncr_nvram *nvram)
+{
+ u_char burst_max;
+ u_long period;
+ int i;
+
+ /*
+ ** Wide ?
+ */
+
+ np->maxwide = (np->features & FE_WIDE)? 1 : 0;
+
+ /*
+ ** Get the frequency of the chip's clock.
+ ** Find the right value for scntl3.
+ */
+
+ if (np->features & FE_QUAD)
+ np->multiplier = 4;
+ else if (np->features & FE_DBLR)
+ np->multiplier = 2;
+ else
+ np->multiplier = 1;
+
+ np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
+ np->clock_khz *= np->multiplier;
+
+ if (np->clock_khz != 40000)
+ ncr_getclock(np, np->multiplier);
+
+ /*
+ * Divisor to be used for async (timer pre-scaler).
+ *
+ * Note: For C1010 the async divisor is 2(8) if he
+ * quadrupler is disabled (enabled).
+ */
+
+ if ( (np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+
+ np->rv_scntl3 = 0;
+ }
+ else
+ {
+ i = np->clock_divn - 1;
+ while (--i >= 0) {
+ if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz
+ > div_10M[i]) {
+ ++i;
+ break;
+ }
+ }
+ np->rv_scntl3 = i+1;
+ }
+
+
+ /*
+ * Save the ultra3 register for the C1010/C1010_66
+ */
+
+ np->rv_scntl4 = np->sv_scntl4;
+
+ /*
+ * Minimum synchronous period factor supported by the chip.
+ * Btw, 'period' is in tenths of nanoseconds.
+ */
+
+ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
+ if (period <= 250) np->minsync = 10;
+ else if (period <= 303) np->minsync = 11;
+ else if (period <= 500) np->minsync = 12;
+ else np->minsync = (period + 40 - 1) / 40;
+
+ /*
+ * Fix up. If sync. factor is 10 (160000Khz clock) and chip
+ * supports ultra3, then min. sync. period 12.5ns and the factor is 9
+ */
+
+ if ((np->minsync == 10) && (np->features & FE_ULTRA3))
+ np->minsync = 9;
+
+ /*
+ * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
+ *
+ * Transfer period minimums: SCSI-1 200 (50); Fast 100 (25)
+ * Ultra 50 (12); Ultra2 (6); Ultra3 (3)
+ */
+
+ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
+ np->minsync = 25;
+ else if (np->minsync < 12 && (np->features & FE_ULTRA))
+ np->minsync = 12;
+ else if (np->minsync < 10 && (np->features & FE_ULTRA2))
+ np->minsync = 10;
+ else if (np->minsync < 9 && (np->features & FE_ULTRA3))
+ np->minsync = 9;
+
+ /*
+ * Maximum synchronous period factor supported by the chip.
+ */
+
+ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+ np->maxsync = period > 2540 ? 254 : period / 10;
+
+ /*
+ ** 64 bit (53C895A or 53C896) ?
+ */
+ if (np->features & FE_64BIT)
+#ifdef SCSI_NCR_USE_64BIT_DAC
+ np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
+#else
+ np->rv_ccntl1 |= (DDAC);
+#endif
+
+ /*
+ ** Phase mismatch handled by SCRIPTS (53C895A, 53C896 or C1010) ?
+ */
+ if (np->features & FE_NOPM)
+ np->rv_ccntl0 |= (ENPMJ);
+
+ /*
+ ** Prepare initial value of other IO registers
+ */
+#if defined SCSI_NCR_TRUST_BIOS_SETTING
+ np->rv_scntl0 = np->sv_scntl0;
+ np->rv_dmode = np->sv_dmode;
+ np->rv_dcntl = np->sv_dcntl;
+ np->rv_ctest3 = np->sv_ctest3;
+ np->rv_ctest4 = np->sv_ctest4;
+ np->rv_ctest5 = np->sv_ctest5;
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+#else
+
+ /*
+ ** Select burst length (dwords)
+ */
+ burst_max = driver_setup.burst_max;
+ if (burst_max == 255)
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+ if (burst_max > 7)
+ burst_max = 7;
+ if (burst_max > np->maxburst)
+ burst_max = np->maxburst;
+
+ /*
+ ** DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
+ ** This chip and the 860 Rev 1 may wrongly use PCI cache line
+ ** based transactions on LOAD/STORE instructions. So we have
+ ** to prevent these chips from using such PCI transactions in
+ ** this driver. The generic sym53c8xx driver that does not use
+ ** LOAD/STORE instructions does not need this work-around.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_NCR_53C810 &&
+ np->revision_id >= 0x10 && np->revision_id <= 0x11) ||
+ (np->device_id == PCI_DEVICE_ID_NCR_53C860 &&
+ np->revision_id <= 0x1))
+ np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
+
+ /*
+ ** DEL ? - 53C1010 Rev 1 - Part Number 609-0393638
+ ** 64-bit Slave Cycles must be disabled.
+ */
+ if ( ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) && (np->revision_id < 0x02) )
+ || (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 ) )
+ np->rv_ccntl1 |= 0x10;
+
+ /*
+ ** Select all supported special features.
+ ** If we are using on-board RAM for scripts, prefetch (PFEN)
+ ** does not help, but burst op fetch (BOF) does.
+ ** Disabling PFEN makes sure BOF will be used.
+ */
+ if (np->features & FE_ERL)
+ np->rv_dmode |= ERL; /* Enable Read Line */
+ if (np->features & FE_BOF)
+ np->rv_dmode |= BOF; /* Burst Opcode Fetch */
+ if (np->features & FE_ERMP)
+ np->rv_dmode |= ERMP; /* Enable Read Multiple */
+#if 1
+ if ((np->features & FE_PFEN) && !np->base2_ba)
+#else
+ if (np->features & FE_PFEN)
+#endif
+ np->rv_dcntl |= PFEN; /* Prefetch Enable */
+ if (np->features & FE_CLSE)
+ np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
+ if (np->features & FE_WRIE)
+ np->rv_ctest3 |= WRIE; /* Write and Invalidate */
+
+
+ if ( (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) &&
+ (np->features & FE_DFS))
+ np->rv_ctest5 |= DFS; /* Dma Fifo Size */
+ /* C1010/C1010_66 always large fifo */
+
+ /*
+ ** Select some other
+ */
+ if (driver_setup.master_parity)
+ np->rv_ctest4 |= MPEE; /* Master parity checking */
+ if (driver_setup.scsi_parity)
+ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ /*
+ ** Get parity checking, host ID and verbose mode from NVRAM
+ **/
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ np->myaddr = nvram->data.Tekram.host_id & 0x0f;
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
+ np->rv_scntl0 &= ~0x0a;
+ np->myaddr = nvram->data.Symbios.host_id & 0x0f;
+ if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
+ np->verbose += 1;
+ break;
+ }
+ }
+#endif
+ /*
+ ** Get SCSI addr of host adapter (set by bios?).
+ */
+ if (np->myaddr == 255) {
+ np->myaddr = INB(nc_scid) & 0x07;
+ if (!np->myaddr)
+ np->myaddr = SCSI_NCR_MYADDR;
+ }
+
+#endif /* SCSI_NCR_TRUST_BIOS_SETTING */
+
+ /*
+ * Prepare initial io register bits for burst length
+ */
+ ncr_init_burst(np, burst_max);
+
+ /*
+ ** Set SCSI BUS mode.
+ **
+ ** - ULTRA2 chips (895/895A/896)
+ ** and ULTRA 3 chips (1010) report the current
+ ** BUS mode through the STEST4 IO register.
+ ** - For previous generation chips (825/825A/875),
+ ** user has to tell us how to check against HVD,
+ ** since a 100% safe algorithm is not possible.
+ */
+ np->scsi_mode = SMODE_SE;
+ if (np->features & (FE_ULTRA2 | FE_ULTRA3))
+ np->scsi_mode = (np->sv_stest4 & SMODE);
+ else if (np->features & FE_DIFF) {
+ switch(driver_setup.diff_support) {
+ case 4: /* Trust previous settings if present, then GPIO3 */
+ if (np->sv_scntl3) {
+ if (np->sv_stest2 & 0x20)
+ np->scsi_mode = SMODE_HVD;
+ break;
+ }
+ case 3: /* SYMBIOS controllers report HVD through GPIO3 */
+ if (nvram && nvram->type != SCSI_NCR_SYMBIOS_NVRAM)
+ break;
+ if (INB(nc_gpreg) & 0x08)
+ break;
+ case 2: /* Set HVD unconditionally */
+ np->scsi_mode = SMODE_HVD;
+ case 1: /* Trust previous settings for HVD */
+ if (np->sv_stest2 & 0x20)
+ np->scsi_mode = SMODE_HVD;
+ break;
+ default:/* Don't care about HVD */
+ break;
+ }
+ }
+ if (np->scsi_mode == SMODE_HVD)
+ np->rv_stest2 |= 0x20;
+
+ /*
+ ** Set LED support from SCRIPTS.
+ ** Ignore this feature for boards known to use a
+ ** specific GPIO wiring and for the 895A or 896
+ ** that drive the LED directly.
+ ** Also probe initial setting of GPIO0 as output.
+ */
+ if ((driver_setup.led_pin ||
+ (nvram && nvram->type == SCSI_NCR_SYMBIOS_NVRAM)) &&
+ !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
+ np->features |= FE_LED0;
+
+ /*
+ ** Set irq mode.
+ */
+ switch(driver_setup.irqm & 3) {
+ case 2:
+ np->rv_dcntl |= IRQM;
+ break;
+ case 1:
+ np->rv_dcntl |= (np->sv_dcntl & IRQM);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ ** Configure targets according to driver setup.
+ ** If NVRAM present get targets setup from NVRAM.
+ ** Allow to override sync, wide and NOSCAN from
+ ** boot command line.
+ */
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->usrsync = 255;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ ncr_Tekram_setup_target(np, i, &nvram->data.Tekram);
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ ncr_Symbios_setup_target(np, i, &nvram->data.Symbios);
+ break;
+ }
+ if (driver_setup.use_nvram & 0x2)
+ tp->usrsync = driver_setup.default_sync;
+ if (driver_setup.use_nvram & 0x4)
+ tp->usrwide = driver_setup.max_wide;
+ if (driver_setup.use_nvram & 0x8)
+ tp->usrflag &= ~UF_NOSCAN;
+ }
+ else {
+#else
+ if (1) {
+#endif
+ tp->usrsync = driver_setup.default_sync;
+ tp->usrwide = driver_setup.max_wide;
+ tp->usrtags = MAX_TAGS;
+ if (!driver_setup.disconnection)
+ np->target[i].usrflag = UF_NODISC;
+ }
+ }
+
+ /*
+ ** Announce all that stuff to user.
+ */
+
+ i = nvram ? nvram->type : 0;
+ printk(KERN_INFO "%s: %sID %d, Fast-%d%s%s\n", ncr_name(np),
+ i == SCSI_NCR_SYMBIOS_NVRAM ? "Symbios format NVRAM, " :
+ (i == SCSI_NCR_TEKRAM_NVRAM ? "Tekram format NVRAM, " : ""),
+ np->myaddr,
+ np->minsync < 10 ? 80 :
+ (np->minsync < 12 ? 40 : (np->minsync < 25 ? 20 : 10) ),
+ (np->rv_scntl0 & 0xa) ? ", Parity Checking" : ", NO Parity",
+ (np->rv_stest2 & 0x20) ? ", Differential" : "");
+
+ if (bootverbose > 1) {
+ printk (KERN_INFO "%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
+ np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
+
+ printk (KERN_INFO "%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
+ np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
+ }
+
+ if (bootverbose && np->base2_ba)
+ printk (KERN_INFO "%s: on-chip RAM at 0x%lx\n",
+ ncr_name(np), np->base2_ba);
+
+ return 0;
+}
+
+
+#ifdef SCSI_NCR_DEBUG_NVRAM
+
+void __init ncr_display_Symbios_nvram(ncb_p np, Symbios_nvram *nvram)
+{
+ int i;
+
+ /* display Symbios nvram host data */
+ printk(KERN_DEBUG "%s: HOST ID=%d%s%s%s%s%s\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
+ (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+
+ /* display Symbios nvram drive data */
+ for (i = 0 ; i < 15 ; i++) {
+ struct Symbios_target *tn = &nvram->target[i];
+ printk(KERN_DEBUG "%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+ ncr_name(np), i,
+ (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
+ (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
+ tn->bus_width,
+ tn->sync_period / 4,
+ tn->timeout);
+ }
+}
+
+static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120};
+
+void __init ncr_display_Tekram_nvram(ncb_p np, Tekram_nvram *nvram)
+{
+ int i, tags, boot_delay;
+ char *rem;
+
+ /* display Tekram nvram host data */
+ tags = 2 << nvram->max_tags_index;
+ boot_delay = 0;
+ if (nvram->boot_delay_index < 6)
+ boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+ switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+ default:
+ case 0: rem = ""; break;
+ case 1: rem = " REMOVABLE=boot device"; break;
+ case 2: rem = " REMOVABLE=all"; break;
+ }
+
+ printk(KERN_DEBUG
+ "%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
+ rem, boot_delay, tags);
+
+ /* display Tekram nvram drive data */
+ for (i = 0; i <= 15; i++) {
+ int sync, j;
+ struct Tekram_target *tn = &nvram->target[i];
+ j = tn->sync_index & 0xf;
+ sync = Tekram_sync[j];
+ printk(KERN_DEBUG "%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
+ ncr_name(np), i,
+ (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
+ (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
+ (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & TEKRAM_START_CMD) ? " START" : "",
+ (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
+ (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
+ sync);
+ }
+}
+#endif /* SCSI_NCR_DEBUG_NVRAM */
+
+/*
+** Host attach and initialisations.
+**
+** Allocate host data and ncb structure.
+** Request IO region and remap MMIO region.
+** Do chip initialization.
+** If all is OK, install interrupt handling and
+** start the timer daemon.
+*/
+
+static int __init
+ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device)
+{
+ struct host_data *host_data;
+ ncb_p np = 0;
+ struct Scsi_Host *instance = 0;
+ u_long flags = 0;
+ ncr_nvram *nvram = device->nvram;
+ int i;
+
+ printk(KERN_INFO NAME53C "%s-%d: rev 0x%x on pci bus %d device %d function %d "
+#ifdef __sparc__
+ "irq %s\n",
+#else
+ "irq %d\n",
+#endif
+ device->chip.name, unit, device->chip.revision_id,
+ device->slot.bus, (device->slot.device_fn & 0xf8) >> 3,
+ device->slot.device_fn & 7,
+#ifdef __sparc__
+ __irq_itoa(device->slot.irq));
+#else
+ device->slot.irq);
+#endif
+
+ /*
+ ** Allocate host_data structure
+ */
+ if (!(instance = scsi_register(tpnt, sizeof(*host_data))))
+ goto attach_error;
+ host_data = (struct host_data *) instance->hostdata;
+
+ /*
+ ** Allocate the host control block.
+ */
+ np = __m_calloc_dma(device->pdev, sizeof(struct ncb), "NCB");
+ if (!np)
+ goto attach_error;
+ NCR_INIT_LOCK_NCB(np);
+ np->pdev = device->pdev;
+ np->p_ncb = vtobus(np);
+ host_data->ncb = np;
+
+ /*
+ ** Store input informations in the host data structure.
+ */
+ strncpy(np->chip_name, device->chip.name, sizeof(np->chip_name) - 1);
+ np->unit = unit;
+ np->verbose = driver_setup.verbose;
+ sprintf(np->inst_name, NAME53C "%s-%d", np->chip_name, np->unit);
+ np->device_id = device->chip.device_id;
+ np->revision_id = device->chip.revision_id;
+ np->bus = device->slot.bus;
+ np->device_fn = device->slot.device_fn;
+ np->features = device->chip.features;
+ np->clock_divn = device->chip.nr_divisor;
+ np->maxoffs = device->chip.offset_max;
+ np->maxburst = device->chip.burst_max;
+ np->myaddr = device->host_id;
+
+ /*
+ ** Allocate the start queue.
+ */
+ np->squeue = (ncrcmd *)
+ m_calloc_dma(sizeof(ncrcmd)*(MAX_START*2), "SQUEUE");
+ if (!np->squeue)
+ goto attach_error;
+ np->p_squeue = vtobus(np->squeue);
+
+ /*
+ ** Allocate the done queue.
+ */
+ np->dqueue = (ncrcmd *)
+ m_calloc_dma(sizeof(ncrcmd)*(MAX_START*2), "DQUEUE");
+ if (!np->dqueue)
+ goto attach_error;
+
+ /*
+ ** Allocate the target bus address array.
+ */
+ np->targtbl = (u_int32 *) m_calloc_dma(256, "TARGTBL");
+ if (!np->targtbl)
+ goto attach_error;
+
+ /*
+ ** Allocate SCRIPTS areas
+ */
+ np->script0 = (struct script *)
+ m_calloc_dma(sizeof(struct script), "SCRIPT");
+ if (!np->script0)
+ goto attach_error;
+ np->scripth0 = (struct scripth *)
+ m_calloc_dma(sizeof(struct scripth), "SCRIPTH");
+ if (!np->scripth0)
+ goto attach_error;
+
+ /*
+ ** Initialyze the CCB free queue and,
+ ** allocate some CCB. We need at least ONE.
+ */
+ xpt_que_init(&np->free_ccbq);
+ xpt_que_init(&np->b0_ccbq);
+ if (!ncr_alloc_ccb(np))
+ goto attach_error;
+
+ /*
+ ** Initialize timer structure
+ **
+ */
+ init_timer(&np->timer);
+ np->timer.data = (unsigned long) np;
+ np->timer.function = sym53c8xx_timeout;
+
+ /*
+ ** Try to map the controller chip to
+ ** virtual and physical memory.
+ */
+
+ np->base_ba = device->slot.base;
+ np->base_ws = (np->features & FE_IO256)? 256 : 128;
+ np->base2_ba = (np->features & FE_RAM)? device->slot.base_2 : 0;
+
+#ifndef SCSI_NCR_IOMAPPED
+ np->base_va = remap_pci_mem(np->base_ba, np->base_ws);
+ if (!np->base_va) {
+ printk(KERN_ERR "%s: can't map PCI MMIO region\n",ncr_name(np));
+ goto attach_error;
+ }
+ else if (bootverbose > 1)
+ printk(KERN_INFO "%s: using memory mapped IO\n", ncr_name(np));
+
+ /*
+ ** Make the controller's registers available.
+ ** Now the INB INW INL OUTB OUTW OUTL macros
+ ** can be used safely.
+ */
+
+ np->reg = (struct ncr_reg *) np->base_va;
+
+#endif /* !defined SCSI_NCR_IOMAPPED */
+
+ /*
+ ** If on-chip RAM is used, make sure SCRIPTS isn't too large.
+ */
+ if (np->base2_ba && sizeof(struct script) > 4096) {
+ printk(KERN_ERR "%s: script too large.\n", ncr_name(np));
+ goto attach_error;
+ }
+
+ /*
+ ** Try to map the controller chip into iospace.
+ */
+
+ if (device->slot.io_port) {
+ request_region(device->slot.io_port, np->base_ws, NAME53C8XX);
+ np->base_io = device->slot.io_port;
+ }
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Symbios_nvram(np, &nvram->data.Symbios);
+#endif
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Tekram_nvram(np, &nvram->data.Tekram);
+#endif
+ break;
+ default:
+ nvram = 0;
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ printk(KERN_DEBUG "%s: NVRAM: None or invalid data.\n", ncr_name(np));
+#endif
+ }
+ }
+#endif
+
+ /*
+ ** Save setting of some IO registers, so we will
+ ** be able to probe specific implementations.
+ */
+ ncr_save_initial_setting (np);
+
+ /*
+ ** Reset the chip now, since it has been reported
+ ** that SCSI clock calibration may not work properly
+ ** if the chip is currently active.
+ */
+ ncr_chip_reset (np);
+
+ /*
+ ** Do chip dependent initialization.
+ */
+ (void) ncr_prepare_setting(np, nvram);
+
+ /*
+ ** Check the PCI clock frequency if needed.
+ **
+ ** Must be done after ncr_prepare_setting since it destroys
+ ** STEST1 that is used to probe for the clock multiplier.
+ **
+ ** The range is currently [22688 - 45375 Khz], given
+ ** the values used by ncr_getclock().
+ ** This calibration of the frequecy measurement
+ ** algorithm against the PCI clock frequency is only
+ ** performed if the driver has had to measure the SCSI
+ ** clock due to other heuristics not having been enough
+ ** to deduce the SCSI clock frequency.
+ **
+ ** When the chip has been initialized correctly by the
+ ** SCSI BIOS, the driver deduces the presence of the
+ ** clock multiplier and the value of the SCSI clock from
+ ** initial values of IO registers, and therefore no
+ ** clock measurement is performed.
+ ** Normally the driver should never have to measure any
+ ** clock, unless the controller may use a 80 MHz clock
+ ** or has a clock multiplier and any of the following
+ ** condition is met:
+ **
+ ** - No SCSI BIOS is present.
+ ** - SCSI BIOS did'nt enable the multiplier for some reason.
+ ** - User has disabled the controller from the SCSI BIOS.
+ ** - User booted the O/S from another O/S that did'nt enable
+ ** the multiplier for some reason.
+ **
+ ** As a result, the driver may only have to measure some
+ ** frequency in very unusual situations.
+ **
+ ** For this reality test against the PCI clock to really
+ ** protect against flaws in the udelay() calibration or
+ ** driver problem that affect the clock measurement
+ ** algorithm, the actual PCI clock frequency must be 33 MHz.
+ */
+ i = np->pciclock_max ? ncr_getpciclock(np) : 0;
+ if (i && (i < np->pciclock_min || i > np->pciclock_max)) {
+ printk(KERN_ERR "%s: PCI clock (%u KHz) is out of range "
+ "[%u KHz - %u KHz].\n",
+ ncr_name(np), i, np->pciclock_min, np->pciclock_max);
+ goto attach_error;
+ }
+
+ /*
+ ** Patch script to physical addresses
+ */
+ ncr_script_fill (&script0, &scripth0);
+
+ np->p_script = vtobus(np->script0);
+ np->p_scripth = vtobus(np->scripth0);
+ np->p_scripth0 = np->p_scripth;
+
+ if (np->base2_ba) {
+ np->p_script = pcivtobus(np->base2_ba);
+ if (np->features & FE_RAM8K) {
+ np->base2_ws = 8192;
+ np->p_scripth = np->p_script + 4096;
+#if BITS_PER_LONG > 32
+ np->scr_ram_seg = cpu_to_scr(np->base2_ba >> 32);
+#endif
+ }
+ else
+ np->base2_ws = 4096;
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ np->base2_va = remap_pci_mem(np->base2_ba, np->base2_ws);
+ if (!np->base2_va) {
+ printk(KERN_ERR "%s: can't map PCI MEMORY region\n",
+ ncr_name(np));
+ goto attach_error;
+ }
+#endif
+ }
+
+ ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script));
+ ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth));
+
+ /*
+ ** Patch some variables in SCRIPTS
+ */
+ np->scripth0->pm0_data_addr[0] =
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, pm0_data));
+ np->scripth0->pm1_data_addr[0] =
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, pm1_data));
+
+ /*
+ ** Patch if not Ultra 3 - Do not write to scntl4
+ */
+ if (np->features & FE_ULTRA3) {
+ np->script0->resel_scntl4[0] = cpu_to_scr(SCR_LOAD_REL (scntl4, 1));
+ np->script0->resel_scntl4[1] = cpu_to_scr(offsetof(struct tcb, uval));
+ }
+
+
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ np->scripth0->script0_ba[0] = cpu_to_scr(vtobus(np->script0));
+ np->scripth0->script0_ba64[0] = cpu_to_scr(vtobus(np->script0));
+ np->scripth0->scripth0_ba64[0] = cpu_to_scr(vtobus(np->scripth0));
+ np->scripth0->ram_seg64[0] = np->scr_ram_seg;
+#endif
+ /*
+ ** Prepare the idle and invalid task actions.
+ */
+ np->idletask.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->idletask.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l));
+ np->p_idletask = NCB_PHYS(np, idletask);
+
+ np->notask.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->notask.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l));
+ np->p_notask = NCB_PHYS(np, notask);
+
+ np->bad_i_t_l.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->bad_i_t_l.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l));
+ np->p_bad_i_t_l = NCB_PHYS(np, bad_i_t_l);
+
+ np->bad_i_t_l_q.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->bad_i_t_l_q.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np,bad_i_t_l_q));
+ np->p_bad_i_t_l_q = NCB_PHYS(np, bad_i_t_l_q);
+
+ /*
+ ** Allocate and prepare the bad lun table.
+ */
+ np->badluntbl = m_calloc_dma(256, "BADLUNTBL");
+ if (!np->badluntbl)
+ goto attach_error;
+
+ assert (offsetof(struct lcb, resel_task) == 0);
+ np->resel_badlun = cpu_to_scr(NCB_SCRIPTH_PHYS(np, resel_bad_lun));
+
+ for (i = 0 ; i < 64 ; i++)
+ np->badluntbl[i] = cpu_to_scr(NCB_PHYS(np, resel_badlun));
+
+ /*
+ ** Prepare the target bus address array.
+ */
+ np->scripth0->targtbl[0] = cpu_to_scr(vtobus(np->targtbl));
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ np->targtbl[i] = cpu_to_scr(NCB_PHYS(np, target[i]));
+ np->target[i].b_luntbl = cpu_to_scr(vtobus(np->badluntbl));
+ np->target[i].b_lun0 = cpu_to_scr(NCB_PHYS(np, resel_badlun));
+ }
+
+ /*
+ ** Patch the script for LED support.
+ */
+
+ if (np->features & FE_LED0) {
+ np->script0->idle[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01));
+ np->script0->reselected[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ np->script0->start[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ }
+
+ /*
+ ** Patch the script to provide an extra clock cycle on
+ ** data out phase - 53C1010_66MHz part only.
+ */
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66){
+ np->script0->datao_phase[0] =
+ cpu_to_scr(SCR_REG_REG(scntl4, SCR_OR, 0x0c));
+ }
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If user does not want to use IMMEDIATE ARBITRATION
+ ** when we are reselected while attempting to arbitrate,
+ ** patch the SCRIPTS accordingly with a SCRIPT NO_OP.
+ */
+ if (!(driver_setup.iarb & 1))
+ np->script0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
+ /*
+ ** If user wants IARB to be set when we win arbitration
+ ** and have other jobs, compute the max number of consecutive
+ ** settings of IARB hint before we leave devices a chance to
+ ** arbitrate for reselection.
+ */
+ np->iarb_max = (driver_setup.iarb >> 4);
+#endif
+
+ /*
+ ** DEL 472 - 53C896 Rev 1 - Part Number 609-0393055 - ITEM 5.
+ */
+ if (np->device_id == PCI_DEVICE_ID_NCR_53C896 &&
+ np->revision_id <= 0x1 && (np->features & FE_NOPM)) {
+ np->scatter = ncr_scatter_896R1;
+ np->script0->datai_phase[0] = cpu_to_scr(SCR_JUMP);
+ np->script0->datai_phase[1] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj));
+ np->script0->datao_phase[0] = cpu_to_scr(SCR_JUMP);
+ np->script0->datao_phase[1] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj));
+ }
+ else
+#ifdef DEBUG_896R1
+ np->scatter = ncr_scatter_896R1;
+#else
+ np->scatter = ncr_scatter;
+#endif
+
+ /*
+ ** Reset chip.
+ ** We should use ncr_soft_reset(), but we donnot want to do
+ ** so, since we may not be safe if ABRT interrupt occurs due
+ ** to the BIOS or previous O/S having enable this interrupt.
+ **
+ ** For C1010 need to set ABRT bit prior to SRST if SCRIPTs
+ ** are running. Not true in this case.
+ */
+ ncr_chip_reset(np);
+
+ /*
+ ** Now check the cache handling of the pci chipset.
+ */
+
+ if (ncr_snooptest (np)) {
+ printk (KERN_ERR "CACHE INCORRECTLY CONFIGURED.\n");
+ goto attach_error;
+ };
+
+ /*
+ ** Install the interrupt handler.
+ ** If we synchonize the C code with SCRIPTS on interrupt,
+ ** we donnot want to share the INTR line at all.
+ */
+ if (request_irq(device->slot.irq, sym53c8xx_intr,
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ ((driver_setup.irqm & 0x20) ? 0 : SA_INTERRUPT),
+#else
+ ((driver_setup.irqm & 0x10) ? 0 : SA_SHIRQ) |
+
+#if 0 && LINUX_VERSION_CODE < LinuxVersionCode(2,2,0)
+ ((driver_setup.irqm & 0x20) ? 0 : SA_INTERRUPT),
+#else
+ 0,
+#endif
+#endif
+ NAME53C8XX, np)) {
+ printk(KERN_ERR "%s: request irq %d failure\n",
+ ncr_name(np), device->slot.irq);
+ goto attach_error;
+ }
+ np->irq = device->slot.irq;
+
+ /*
+ ** After SCSI devices have been opened, we cannot
+ ** reset the bus safely, so we do it here.
+ ** Interrupt handler does the real work.
+ ** Process the reset exception,
+ ** if interrupts are not enabled yet.
+ ** Then enable disconnects.
+ */
+ NCR_LOCK_NCB(np, flags);
+ if (ncr_reset_scsi_bus(np, 0, driver_setup.settle_delay) != 0) {
+ printk(KERN_ERR "%s: FATAL ERROR: CHECK SCSI BUS - CABLES, TERMINATION, DEVICE POWER etc.!\n", ncr_name(np));
+
+ NCR_UNLOCK_NCB(np, flags);
+ goto attach_error;
+ }
+ ncr_exception (np);
+
+ /*
+ ** The middle-level SCSI driver does not
+ ** wait for devices to settle.
+ ** Wait synchronously if more than 2 seconds.
+ */
+ if (driver_setup.settle_delay > 2) {
+ printk(KERN_INFO "%s: waiting %d seconds for scsi devices to settle...\n",
+ ncr_name(np), driver_setup.settle_delay);
+ MDELAY (1000 * driver_setup.settle_delay);
+ }
+
+ /*
+ ** start the timeout daemon
+ */
+ np->lasttime=0;
+ ncr_timeout (np);
+
+ /*
+ ** use SIMPLE TAG messages by default
+ */
+#ifdef SCSI_NCR_ALWAYS_SIMPLE_TAG
+ np->order = M_SIMPLE_TAG;
+#endif
+
+ /*
+ ** Done.
+ */
+ if (!first_host)
+ first_host = instance;
+
+ /*
+ ** Fill Linux host instance structure
+ ** and return success.
+ */
+ instance->max_channel = 0;
+ instance->this_id = np->myaddr;
+ instance->max_id = np->maxwide ? 16 : 8;
+ instance->max_lun = MAX_LUN;
+#ifndef SCSI_NCR_IOMAPPED
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,29)
+ instance->base = (unsigned long) np->reg;
+#else
+ instance->base = (char *) np->reg;
+#endif
+#endif
+ instance->irq = np->irq;
+ instance->unique_id = np->base_io;
+ instance->io_port = np->base_io;
+ instance->n_io_port = np->base_ws;
+ instance->dma_channel = 0;
+ instance->cmd_per_lun = MAX_TAGS;
+ instance->can_queue = (MAX_START-4);
+
+ np->check_integrity = 0;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ instance->check_integrity = 0;
+
+#ifdef SCSI_NCR_ENABLE_INTEGRITY_CHECK
+ if ( !(driver_setup.bus_check & 0x04) ) {
+ np->check_integrity = 1;
+ instance->check_integrity = 1;
+ }
+#endif
+#endif
+
+ instance->select_queue_depths = sym53c8xx_select_queue_depths;
+
+ NCR_UNLOCK_NCB(np, flags);
+
+ /*
+ ** Now let the generic SCSI driver
+ ** look for the SCSI devices on the bus ..
+ */
+ return 0;
+
+attach_error:
+ if (!instance) return -1;
+ printk(KERN_INFO "%s: giving up ...\n", ncr_name(np));
+ if (np)
+ ncr_free_resources(np);
+ scsi_unregister(instance);
+
+ return -1;
+ }
+
+
+/*
+** Free controller resources.
+*/
+static void ncr_free_resources(ncb_p np)
+{
+ ccb_p cp;
+ tcb_p tp;
+ lcb_p lp;
+ int target, lun;
+
+ if (np->irq)
+ free_irq(np->irq, np);
+ if (np->base_io)
+ release_region(np->base_io, np->base_ws);
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ if (np->base_va)
+ unmap_pci_mem(np->base_va, np->base_ws);
+ if (np->base2_va)
+ unmap_pci_mem(np->base2_va, np->base2_ws);
+#endif
+ if (np->scripth0)
+ m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH");
+ if (np->script0)
+ m_free_dma(np->script0, sizeof(struct script), "SCRIPT");
+ if (np->squeue)
+ m_free_dma(np->squeue, sizeof(ncrcmd)*(MAX_START*2), "SQUEUE");
+ if (np->dqueue)
+ m_free_dma(np->dqueue, sizeof(ncrcmd)*(MAX_START*2),"DQUEUE");
+
+ while ((cp = np->ccbc) != NULL) {
+ np->ccbc = cp->link_ccb;
+ m_free_dma(cp, sizeof(*cp), "CCB");
+ }
+
+ if (np->badluntbl)
+ m_free_dma(np->badluntbl, 256,"BADLUNTBL");
+
+ for (target = 0; target < MAX_TARGET ; target++) {
+ tp = &np->target[target];
+ for (lun = 0 ; lun < MAX_LUN ; lun++) {
+ lp = ncr_lp(np, tp, lun);
+ if (!lp)
+ continue;
+ if (lp->tasktbl != &lp->tasktbl_0)
+ m_free_dma(lp->tasktbl, MAX_TASKS*4, "TASKTBL");
+ if (lp->cb_tags)
+ m_free(lp->cb_tags, MAX_TAGS, "CB_TAGS");
+ m_free_dma(lp, sizeof(*lp), "LCB");
+ }
+#if MAX_LUN > 1
+ if (tp->lmp)
+ m_free(tp->lmp, MAX_LUN * sizeof(lcb_p), "LMP");
+ if (tp->luntbl)
+ m_free_dma(tp->luntbl, 256, "LUNTBL");
+#endif
+ }
+
+ if (np->targtbl)
+ m_free_dma(np->targtbl, 256, "TARGTBL");
+
+ m_free_dma(np, sizeof(*np), "NCB");
+}
+
+
+/*==========================================================
+**
+**
+** Done SCSI commands list management.
+**
+** We donnot enter the scsi_done() callback immediately
+** after a command has been seen as completed but we
+** insert it into a list which is flushed outside any kind
+** of driver critical section.
+** This allows to do minimal stuff under interrupt and
+** inside critical sections and to also avoid locking up
+** on recursive calls to driver entry points under SMP.
+** In fact, the only kernel point which is entered by the
+** driver with a driver lock set is get_free_pages(GFP_ATOMIC...)
+** that shall not reenter the driver under any circumstance.
+**
+**==========================================================
+*/
+static inline void ncr_queue_done_cmd(ncb_p np, Scsi_Cmnd *cmd)
+{
+ unmap_scsi_data(np, cmd);
+ cmd->host_scribble = (char *) np->done_list;
+ np->done_list = cmd;
+}
+
+static inline void ncr_flush_done_cmds(Scsi_Cmnd *lcmd)
+{
+ Scsi_Cmnd *cmd;
+
+ while (lcmd) {
+ cmd = lcmd;
+ lcmd = (Scsi_Cmnd *) cmd->host_scribble;
+ cmd->scsi_done(cmd);
+ }
+}
+
+/*==========================================================
+**
+**
+** Prepare the next negotiation message for integrity check,
+** if needed.
+**
+** Fill in the part of message buffer that contains the
+** negotiation and the nego_status field of the CCB.
+** Returns the size of the message in bytes.
+**
+** If tp->ppr_negotiation is 1 and a M_REJECT occurs, then
+** we disable ppr_negotiation. If the first ppr_negotiation is
+** successful, set this flag to 2.
+**
+**==========================================================
+*/
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_char *msgptr)
+{
+ tcb_p tp = &np->target[cp->target];
+ int msglen = 0;
+ int nego = 0;
+ u_char new_width, new_offset, new_period;
+ u_char no_increase;
+
+ if (tp->ppr_negotiation == 1) /* PPR message successful */
+ tp->ppr_negotiation = 2;
+
+ if (tp->inq_done) {
+
+ if (!tp->ic_maximums_set) {
+ tp->ic_maximums_set = 1;
+
+ /*
+ * Check against target, host and user limits
+ */
+ if ( (tp->inq_byte7 & INQ7_WIDE16) &&
+ np->maxwide && tp->usrwide)
+ tp->ic_max_width = 1;
+ else
+ tp->ic_max_width = 0;
+
+
+ if ((tp->inq_byte7 & INQ7_SYNC) && tp->maxoffs)
+ tp->ic_min_sync = (tp->minsync < np->minsync) ?
+ np->minsync : tp->minsync;
+ else
+ tp->ic_min_sync = 255;
+
+ tp->period = 1;
+ tp->widedone = 1;
+
+ /*
+ * Enable PPR negotiation - only if Ultra3 support
+ * is accessible.
+ */
+
+#if 0
+ if (tp->ic_max_width && (tp->ic_min_sync != 255 ))
+ tp->ppr_negotiation = 1;
+#endif
+ tp->ppr_negotiation = 0;
+ if (np->features & FE_ULTRA3) {
+ if (tp->ic_max_width && (tp->ic_min_sync == 0x09))
+ tp->ppr_negotiation = 1;
+ }
+
+ if (!tp->ppr_negotiation)
+ cmd->ic_nego &= ~NS_PPR;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_IC) {
+ printk("%s: cmd->ic_nego %d, 1st byte 0x%2X\n",
+ ncr_name(np), cmd->ic_nego, cmd->cmnd[0]);
+ }
+
+ /* Previous command recorded a parity or an initiator
+ * detected error condition. Force bus to narrow for this
+ * target. Clear flag. Negotation on request sense.
+ * Note: kernel forces 2 bus resets :o( but clears itself out.
+ * Minor bug? in scsi_obsolete.c (ugly)
+ */
+ if (np->check_integ_par) {
+ printk("%s: Parity Error. Target set to narrow.\n",
+ ncr_name(np));
+ tp->ic_max_width = 0;
+ tp->widedone = tp->period = 0;
+ }
+
+ /* Initializing:
+ * If ic_nego == NS_PPR, we are in the initial test for
+ * PPR messaging support. If driver flag is clear, then
+ * either we don't support PPR nego (narrow or async device)
+ * or this is the second TUR and we have had a M. REJECT
+ * or unexpected disconnect on the first PPR negotiation.
+ * Do not negotiate, reset nego flags (in case a reset has
+ * occurred), clear ic_nego and return.
+ * General case: Kernel will clear flag on a fallback.
+ * Do only SDTR or WDTR in the future.
+ */
+ if (!tp->ppr_negotiation && (cmd->ic_nego == NS_PPR )) {
+ tp->ppr_negotiation = 0;
+ cmd->ic_nego &= ~NS_PPR;
+ tp->widedone = tp->period = 1;
+ return msglen;
+ }
+ else if (( tp->ppr_negotiation && !(cmd->ic_nego & NS_PPR )) ||
+ (!tp->ppr_negotiation && (cmd->ic_nego & NS_PPR )) ) {
+ tp->ppr_negotiation = 0;
+ cmd->ic_nego &= ~NS_PPR;
+ }
+
+ /*
+ * Always check the PPR nego. flag bit if ppr_negotiation
+ * is set. If the ic_nego PPR bit is clear,
+ * there must have been a fallback. Do only
+ * WDTR / SDTR in the future.
+ */
+ if ((tp->ppr_negotiation) && (!(cmd->ic_nego & NS_PPR)))
+ tp->ppr_negotiation = 0;
+
+ /* In case of a bus reset, ncr_negotiate will reset
+ * the flags tp->widedone and tp->period to 0, forcing
+ * a new negotiation. Do WDTR then SDTR. If PPR, do both.
+ * Do NOT increase the period. It is possible for the Scsi_Cmnd
+ * flags to be set to increase the period when a bus reset
+ * occurs - we don't want to change anything.
+ */
+
+ no_increase = 0;
+
+ if (tp->ppr_negotiation && (!tp->widedone) && (!tp->period) ) {
+ cmd->ic_nego = NS_PPR;
+ tp->widedone = tp->period = 1;
+ no_increase = 1;
+ }
+ else if (!tp->widedone) {
+ cmd->ic_nego = NS_WIDE;
+ tp->widedone = 1;
+ no_increase = 1;
+ }
+ else if (!tp->period) {
+ cmd->ic_nego = NS_SYNC;
+ tp->period = 1;
+ no_increase = 1;
+ }
+
+ new_width = cmd->ic_nego_width & tp->ic_max_width;
+
+ switch (cmd->ic_nego_sync) {
+ case 2: /* increase the period */
+ if (!no_increase) {
+ if (tp->ic_min_sync <= 0x09)
+ tp->ic_min_sync = 0x0A;
+ else if (tp->ic_min_sync <= 0x0A)
+ tp->ic_min_sync = 0x0C;
+ else if (tp->ic_min_sync <= 0x0C)
+ tp->ic_min_sync = 0x19;
+ else if (tp->ic_min_sync <= 0x19)
+ tp->ic_min_sync *= 2;
+ else {
+ tp->ic_min_sync = 255;
+ cmd->ic_nego_sync = 0;
+ tp->maxoffs = 0;
+ }
+ }
+ new_period = tp->maxoffs?tp->ic_min_sync:0;
+ new_offset = tp->maxoffs;
+ break;
+
+ case 1: /* nego. to maximum */
+ new_period = tp->maxoffs?tp->ic_min_sync:0;
+ new_offset = tp->maxoffs;
+ break;
+
+ case 0: /* nego to async */
+ default:
+ new_period = 0;
+ new_offset = 0;
+ break;
+ };
+
+
+ nego = NS_NOCHANGE;
+ if (tp->ppr_negotiation) {
+ u_char options_byte = 0;
+
+ /*
+ ** Must make sure data is consistent.
+ ** If period is 9 and sync, must be wide and DT bit set.
+ ** else period must be larger. If the width is 0,
+ ** reset bus to wide but increase the period to 0x0A.
+ ** Note: The strange else clause is due to the integrity check.
+ ** If fails at 0x09, wide, the I.C. code will redo at the same
+ ** speed but a narrow bus. The driver must take care of slowing
+ ** the bus speed down.
+ **
+ ** The maximum offset in ST mode is 31, in DT mode 62 (1010/1010_66 only)
+ */
+ if ( (new_period==0x09) && new_offset) {
+ if (new_width)
+ options_byte = 0x02;
+ else {
+ tp->ic_min_sync = 0x0A;
+ new_period = 0x0A;
+ cmd->ic_nego_width = 1;
+ new_width = 1;
+ new_offset &= 0x1f;
+ }
+ }
+ else if (new_period > 0x09)
+ new_offset &= 0x1f;
+
+ nego = NS_PPR;
+
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 6;
+ msgptr[msglen++] = M_X_PPR_REQ;
+ msgptr[msglen++] = new_period;
+ msgptr[msglen++] = 0;
+ msgptr[msglen++] = new_offset;
+ msgptr[msglen++] = new_width;
+ msgptr[msglen++] = options_byte;
+
+ }
+ else {
+ switch (cmd->ic_nego & ~NS_PPR) {
+ case NS_WIDE:
+ /*
+ ** WDTR negotiation on if device supports
+ ** wide or if wide device forced narrow
+ ** due to a parity error.
+ */
+
+ cmd->ic_nego_width &= tp->ic_max_width;
+
+ if (tp->ic_max_width | np->check_integ_par) {
+ nego = NS_WIDE;
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 2;
+ msgptr[msglen++] = M_X_WIDE_REQ;
+ msgptr[msglen++] = new_width;
+ }
+ break;
+
+ case NS_SYNC:
+ /*
+ ** negotiate synchronous transfers
+ ** Target must support sync transfers.
+ ** Min. period = 0x0A, maximum offset of 31=0x1f.
+ */
+
+ if (tp->inq_byte7 & INQ7_SYNC) {
+
+ if (new_offset && (new_period < 0x0A)) {
+ tp->ic_min_sync = 0x0A;
+ new_period = 0x0A;
+ }
+ nego = NS_SYNC;
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 3;
+ msgptr[msglen++] = M_X_SYNC_REQ;
+ msgptr[msglen++] = new_period;
+ msgptr[msglen++] = new_offset & 0x1f;
+ }
+ else
+ cmd->ic_nego_sync = 0;
+ break;
+
+ case NS_NOCHANGE:
+ break;
+ }
+ }
+
+ };
+
+ cp->nego_status = nego;
+ np->check_integ_par = 0;
+
+ if (nego) {
+ tp->nego_cp = cp;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, nego == NS_WIDE ?
+ "wide/narrow msgout":
+ (nego == NS_SYNC ? "sync/async msgout" : "ppr msgout"),
+ msgptr);
+ };
+ };
+
+ return msglen;
+}
+#endif /* SCSI_NCR_INTEGRITY_CHECKING */
+
+/*==========================================================
+**
+**
+** Prepare the next negotiation message if needed.
+**
+** Fill in the part of message buffer that contains the
+** negotiation and the nego_status field of the CCB.
+** Returns the size of the message in bytes.
+**
+**
+**==========================================================
+*/
+
+
+static int ncr_prepare_nego(ncb_p np, ccb_p cp, u_char *msgptr)
+{
+ tcb_p tp = &np->target[cp->target];
+ int msglen = 0;
+ int nego = 0;
+ u_char width, offset, factor, last_byte;
+
+ if (!np->check_integrity) {
+ /* If integrity checking disabled, enable PPR messaging
+ * if device supports wide, sync and ultra 3
+ */
+ if (tp->ppr_negotiation == 1) /* PPR message successful */
+ tp->ppr_negotiation = 2;
+
+ if ((tp->inq_done) && (!tp->ic_maximums_set)) {
+ tp->ic_maximums_set = 1;
+
+ /*
+ * Issue PPR only if board is capable
+ * and set-up for Ultra3 transfers.
+ */
+ tp->ppr_negotiation = 0;
+ if ( (np->features & FE_ULTRA3) &&
+ (tp->usrwide) && (tp->maxoffs) &&
+ (tp->minsync == 0x09) )
+ tp->ppr_negotiation = 1;
+ }
+ }
+
+ if (tp->inq_done) {
+ /*
+ * Get the current width, offset and period
+ */
+ ncr_get_xfer_info( np, tp, &factor,
+ &offset, &width);
+
+ /*
+ ** negotiate wide transfers ?
+ */
+
+ if (!tp->widedone) {
+ if (tp->inq_byte7 & INQ7_WIDE16) {
+ if (tp->ppr_negotiation)
+ nego = NS_PPR;
+ else
+ nego = NS_WIDE;
+
+ width = tp->usrwide;
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if (tp->ic_done)
+ width &= tp->ic_max_width;
+#endif
+ } else
+ tp->widedone=1;
+
+ };
+
+ /*
+ ** negotiate synchronous transfers?
+ */
+
+ if ((nego != NS_WIDE) && !tp->period) {
+ if (tp->inq_byte7 & INQ7_SYNC) {
+ if (tp->ppr_negotiation)
+ nego = NS_PPR;
+ else
+ nego = NS_SYNC;
+
+ /* Check for async flag */
+ if (tp->maxoffs == 0) {
+ offset = 0;
+ factor = 0;
+ }
+ else {
+ offset = tp->maxoffs;
+ factor = tp->minsync;
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if ((tp->ic_done) &&
+ (factor < tp->ic_min_sync))
+ factor = tp->ic_min_sync;
+#endif
+ }
+
+ } else {
+ offset = 0;
+ factor = 0;
+ tp->period =0xffff;
+ PRINT_TARGET(np, cp->target);
+ printk ("target did not report SYNC.\n");
+ };
+ };
+ };
+
+ switch (nego) {
+ case NS_PPR:
+ /*
+ ** Must make sure data is consistent.
+ ** If period is 9 and sync, must be wide and DT bit set
+ ** else period must be larger.
+ ** Maximum offset is 31=0x1f is ST mode, 62 if DT mode
+ */
+ last_byte = 0;
+ if ( (factor==9) && offset) {
+ if (!width) {
+ factor = 0x0A;
+ offset &= 0x1f;
+ }
+ else
+ last_byte = 0x02;
+ }
+ else if (factor > 0x09)
+ offset &= 0x1f;
+
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 6;
+ msgptr[msglen++] = M_X_PPR_REQ;
+ msgptr[msglen++] = factor;
+ msgptr[msglen++] = 0;
+ msgptr[msglen++] = offset;
+ msgptr[msglen++] = width;
+ msgptr[msglen++] = last_byte;
+ break;
+ case NS_SYNC:
+ /*
+ ** Never negotiate faster than Ultra 2 (25ns periods)
+ */
+ if (offset && (factor < 0x0A)) {
+ factor = 0x0A;
+ tp->minsync = 0x0A;
+ }
+
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 3;
+ msgptr[msglen++] = M_X_SYNC_REQ;
+ msgptr[msglen++] = factor;
+ msgptr[msglen++] = offset & 0x1f;
+ break;
+ case NS_WIDE:
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 2;
+ msgptr[msglen++] = M_X_WIDE_REQ;
+ msgptr[msglen++] = width;
+ break;
+ };
+
+ cp->nego_status = nego;
+
+ if (nego) {
+ tp->nego_cp = cp;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, nego == NS_WIDE ?
+ "wide msgout":
+ (nego == NS_SYNC ? "sync msgout" : "ppr msgout"),
+ msgptr);
+ };
+ };
+
+ return msglen;
+}
+
+/*==========================================================
+**
+**
+** Start execution of a SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_queue_command (ncb_p np, Scsi_Cmnd *cmd)
+{
+/* Scsi_Device *device = cmd->device; */
+ tcb_p tp = &np->target[cmd->target];
+ lcb_p lp = ncr_lp(np, tp, cmd->lun);
+ ccb_p cp;
+
+ u_char idmsg, *msgptr;
+ u_int msglen;
+ int direction;
+ u_int32 lastp, goalp;
+
+ /*---------------------------------------------
+ **
+ ** Some shortcuts ...
+ **
+ **---------------------------------------------
+ */
+ if ((cmd->target == np->myaddr ) ||
+ (cmd->target >= MAX_TARGET) ||
+ (cmd->lun >= MAX_LUN )) {
+ return(DID_BAD_TARGET);
+ }
+
+ /*---------------------------------------------
+ **
+ ** Complete the 1st TEST UNIT READY command
+ ** with error condition if the device is
+ ** flagged NOSCAN, in order to speed up
+ ** the boot.
+ **
+ **---------------------------------------------
+ */
+ if (cmd->cmnd[0] == 0 && (tp->usrflag & UF_NOSCAN)) {
+ tp->usrflag &= ~UF_NOSCAN;
+ return DID_BAD_TARGET;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_TINY) {
+ PRINT_ADDR(cmd);
+ printk ("CMD=%x ", cmd->cmnd[0]);
+ }
+
+ /*---------------------------------------------------
+ **
+ ** Assign a ccb / bind cmd.
+ ** If resetting, shorten settle_time if necessary
+ ** in order to avoid spurious timeouts.
+ ** If resetting or no free ccb,
+ ** insert cmd into the waiting list.
+ **
+ **----------------------------------------------------
+ */
+ if (np->settle_time && cmd->timeout_per_command >= HZ) {
+ u_long tlimit = ktime_get(cmd->timeout_per_command - HZ);
+ if (ktime_dif(np->settle_time, tlimit) > 0)
+ np->settle_time = tlimit;
+ }
+
+ if (np->settle_time || !(cp=ncr_get_ccb (np, cmd->target, cmd->lun))) {
+ insert_into_waiting_list(np, cmd);
+ return(DID_OK);
+ }
+ cp->cmd = cmd;
+
+ /*---------------------------------------------------
+ **
+ ** Enable tagged queue if asked by scsi ioctl
+ **
+ **----------------------------------------------------
+ */
+#if 0 /* This stuff was only usefull for linux-1.2.13 */
+ if (lp && !lp->numtags && cmd->device && cmd->device->tagged_queue) {
+ lp->numtags = tp->usrtags;
+ ncr_setup_tags (np, cp->target, cp->lun);
+ }
+#endif
+
+ /*----------------------------------------------------
+ **
+ ** Build the identify / tag / sdtr message
+ **
+ **----------------------------------------------------
+ */
+
+ idmsg = M_IDENTIFY | cp->lun;
+
+ if (cp ->tag != NO_TAG || (lp && !(tp->usrflag & UF_NODISC)))
+ idmsg |= 0x40;
+
+ msgptr = cp->scsi_smsg;
+ msglen = 0;
+ msgptr[msglen++] = idmsg;
+
+ if (cp->tag != NO_TAG) {
+ char order = np->order;
+
+ /*
+ ** Force ordered tag if necessary to avoid timeouts
+ ** and to preserve interactivity.
+ */
+ if (lp && ktime_exp(lp->tags_stime)) {
+ lp->tags_si = !(lp->tags_si);
+ if (lp->tags_sum[lp->tags_si]) {
+ order = M_ORDERED_TAG;
+ if ((DEBUG_FLAGS & DEBUG_TAGS)||bootverbose>0){
+ PRINT_ADDR(cmd);
+ printk("ordered tag forced.\n");
+ }
+ }
+ lp->tags_stime = ktime_get(3*HZ);
+ }
+
+ if (order == 0) {
+ /*
+ ** Ordered write ops, unordered read ops.
+ */
+ switch (cmd->cmnd[0]) {
+ case 0x08: /* READ_SMALL (6) */
+ case 0x28: /* READ_BIG (10) */
+ case 0xa8: /* READ_HUGE (12) */
+ order = M_SIMPLE_TAG;
+ break;
+ default:
+ order = M_ORDERED_TAG;
+ }
+ }
+ msgptr[msglen++] = order;
+ /*
+ ** For less than 128 tags, actual tags are numbered
+ ** 1,3,5,..2*MAXTAGS+1,since we may have to deal
+ ** with devices that have problems with #TAG 0 or too
+ ** great #TAG numbers. For more tags (up to 256),
+ ** we use directly our tag number.
+ */
+#if MAX_TASKS > (512/4)
+ msgptr[msglen++] = cp->tag;
+#else
+ msgptr[msglen++] = (cp->tag << 1) + 1;
+#endif
+ }
+
+ cp->host_flags = 0;
+
+ /*----------------------------------------------------
+ **
+ ** Build the data descriptors
+ **
+ **----------------------------------------------------
+ */
+
+ direction = scsi_data_direction(cmd);
+ if (direction != SCSI_DATA_NONE) {
+ cp->segments = np->scatter (np, cp, cp->cmd);
+ if (cp->segments < 0) {
+ ncr_free_ccb(np, cp);
+ return(DID_ERROR);
+ }
+ }
+ else {
+ cp->data_len = 0;
+ cp->segments = 0;
+ }
+
+ /*---------------------------------------------------
+ **
+ ** negotiation required?
+ **
+ ** (nego_status is filled by ncr_prepare_nego())
+ **
+ **---------------------------------------------------
+ */
+
+ cp->nego_status = 0;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if ((np->check_integrity && tp->ic_done) || !np->check_integrity) {
+ if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) {
+ msglen += ncr_prepare_nego (np, cp, msgptr + msglen);
+ }
+ }
+ else if (np->check_integrity && (cmd->ic_in_progress)) {
+ msglen += ncr_ic_nego (np, cp, cmd, msgptr + msglen);
+ }
+ else if (np->check_integrity && cmd->ic_complete) {
+ u_long current_period;
+ u_char current_offset, current_width, current_factor;
+
+ ncr_get_xfer_info (np, tp, &current_factor,
+ &current_offset, &current_width);
+
+ tp->ic_max_width = current_width;
+ tp->ic_min_sync = current_factor;
+
+ if (current_factor == 9) current_period = 125;
+ else if (current_factor == 10) current_period = 250;
+ else if (current_factor == 11) current_period = 303;
+ else if (current_factor == 12) current_period = 500;
+ else current_period = current_factor * 40;
+
+ /*
+ * Negotiation for this target is complete. Update flags.
+ */
+ tp->period = current_period;
+ tp->widedone = 1;
+ tp->ic_done = 1;
+
+ printk("%s: Integrity Check Complete: \n", ncr_name(np));
+
+ printk("%s: %s %s SCSI", ncr_name(np),
+ current_offset?"SYNC":"ASYNC",
+ tp->ic_max_width?"WIDE":"NARROW");
+ if (current_offset) {
+ u_long mbs = 10000 * (tp->ic_max_width + 1);
+
+ printk(" %d.%d MB/s",
+ (int) (mbs / current_period), (int) (mbs % current_period));
+
+ printk(" (%d ns, %d offset)\n",
+ (int) current_period/10, current_offset);
+ }
+ else
+ printk(" %d MB/s. \n ", (tp->ic_max_width+1)*5);
+ }
+#else
+ if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) {
+ msglen += ncr_prepare_nego (np, cp, msgptr + msglen);
+ }
+#endif /* SCSI_NCR_INTEGRITY_CHECKING */
+
+
+ /*----------------------------------------------------
+ **
+ ** Determine xfer direction.
+ **
+ **----------------------------------------------------
+ */
+ if (!cp->data_len)
+ direction = SCSI_DATA_NONE;
+
+ /*
+ ** If data direction is UNKNOWN, speculate DATA_READ
+ ** but prepare alternate pointers for WRITE in case
+ ** of our speculation will be just wrong.
+ ** SCRIPTS will swap values if needed.
+ */
+ switch(direction) {
+ case SCSI_DATA_UNKNOWN:
+ case SCSI_DATA_WRITE:
+ goalp = NCB_SCRIPT_PHYS (np, data_out2) + 8;
+ lastp = goalp - 8 - (cp->segments * (SCR_SG_SIZE*4));
+ if (direction != SCSI_DATA_UNKNOWN)
+ break;
+ cp->phys.header.wgoalp = cpu_to_scr(goalp);
+ cp->phys.header.wlastp = cpu_to_scr(lastp);
+ /* fall through */
+ case SCSI_DATA_READ:
+ cp->host_flags |= HF_DATA_IN;
+ goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8;
+ lastp = goalp - 8 - (cp->segments * (SCR_SG_SIZE*4));
+ break;
+ default:
+ case SCSI_DATA_NONE:
+ lastp = goalp = NCB_SCRIPTH_PHYS (np, no_data);
+ break;
+ }
+
+ /*
+ ** Set all pointers values needed by SCRIPTS.
+ ** If direction is unknown, start at data_io.
+ */
+ cp->phys.header.lastp = cpu_to_scr(lastp);
+ cp->phys.header.goalp = cpu_to_scr(goalp);
+
+ if (direction == SCSI_DATA_UNKNOWN)
+ cp->phys.header.savep =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, data_io));
+ else
+ cp->phys.header.savep= cpu_to_scr(lastp);
+
+ /*
+ ** Save the initial data pointer in order to be able
+ ** to redo the command.
+ ** We also have to save the initial lastp, since it
+ ** will be changed to DATA_IO if we don't know the data
+ ** direction and the device completes the command with
+ ** QUEUE FULL status (without entering the data phase).
+ */
+ cp->startp = cp->phys.header.savep;
+ cp->lastp0 = cp->phys.header.lastp;
+
+ /*----------------------------------------------------
+ **
+ ** fill in ccb
+ **
+ **----------------------------------------------------
+ **
+ **
+ ** physical -> virtual backlink
+ ** Generic SCSI command
+ */
+
+ /*
+ ** Startqueue
+ */
+ cp->phys.header.go.start = cpu_to_scr(NCB_SCRIPT_PHYS (np,select));
+ cp->phys.header.go.restart = cpu_to_scr(NCB_SCRIPT_PHYS (np,resel_dsa));
+ /*
+ ** select
+ */
+ cp->phys.select.sel_id = cp->target;
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ cp->phys.select.sel_scntl4 = tp->uval;
+ /*
+ ** message
+ */
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg));
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ /*
+ ** command
+ */
+ memcpy(cp->cdb_buf, cmd->cmnd, MIN(cmd->cmd_len, sizeof(cp->cdb_buf)));
+ cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, cdb_buf[0]));
+ cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
+
+ /*
+ ** status
+ */
+ cp->actualquirks = tp->quirks;
+ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->xerr_status = 0;
+ cp->extra_bytes = 0;
+
+ /*
+ ** extreme data pointer.
+ ** shall be positive, so -1 is lower than lowest.:)
+ */
+ cp->ext_sg = -1;
+ cp->ext_ofs = 0;
+
+ /*----------------------------------------------------
+ **
+ ** Critical region: start this job.
+ **
+ **----------------------------------------------------
+ */
+
+ /*
+ ** activate this job.
+ */
+
+ /*
+ ** insert next CCBs into start queue.
+ ** 2 max at a time is enough to flush the CCB wait queue.
+ */
+ if (lp)
+ ncr_start_next_ccb(np, lp, 2);
+ else
+ ncr_put_start_queue(np, cp);
+
+ /*
+ ** Command is successfully queued.
+ */
+
+ return(DID_OK);
+}
+
+
+/*==========================================================
+**
+**
+** Insert a CCB into the start queue and wake up the
+** SCRIPTS processor.
+**
+**
+**==========================================================
+*/
+
+static void ncr_start_next_ccb(ncb_p np, lcb_p lp, int maxn)
+{
+ XPT_QUEHEAD *qp;
+ ccb_p cp;
+
+ while (maxn-- && lp->queuedccbs < lp->queuedepth) {
+ qp = xpt_remque_head(&lp->wait_ccbq);
+ if (!qp)
+ break;
+ ++lp->queuedccbs;
+ cp = xpt_que_entry(qp, struct ccb, link_ccbq);
+ xpt_insque_tail(qp, &lp->busy_ccbq);
+ lp->tasktbl[cp->tag == NO_TAG ? 0 : cp->tag] =
+ cpu_to_scr(cp->p_ccb);
+ ncr_put_start_queue(np, cp);
+ }
+}
+
+static void ncr_put_start_queue(ncb_p np, ccb_p cp)
+{
+ u_short qidx;
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If the previously queued CCB is not yet done,
+ ** set the IARB hint. The SCRIPTS will go with IARB
+ ** for this job when starting the previous one.
+ ** We leave devices a chance to win arbitration by
+ ** not using more than 'iarb_max' consecutive
+ ** immediate arbitrations.
+ */
+ if (np->last_cp && np->iarb_count < np->iarb_max) {
+ np->last_cp->host_flags |= HF_HINT_IARB;
+ ++np->iarb_count;
+ }
+ else
+ np->iarb_count = 0;
+ np->last_cp = cp;
+#endif
+
+ /*
+ ** insert into start queue.
+ */
+ qidx = np->squeueput + 2;
+ if (qidx >= MAX_START*2) qidx = 0;
+
+ np->squeue [qidx] = cpu_to_scr(np->p_idletask);
+ MEMORY_BARRIER();
+ np->squeue [np->squeueput] = cpu_to_scr(cp->p_ccb);
+
+ np->squeueput = qidx;
+ cp->queued = 1;
+
+ if (DEBUG_FLAGS & DEBUG_QUEUE)
+ printk ("%s: queuepos=%d.\n", ncr_name (np), np->squeueput);
+
+ /*
+ ** Script processor may be waiting for reselect.
+ ** Wake it up.
+ */
+ MEMORY_BARRIER();
+ OUTB (nc_istat, SIGP|np->istat_sem);
+}
+
+
+/*==========================================================
+**
+** Soft reset the chip.
+**
+** Some 896 and 876 chip revisions may hang-up if we set
+** the SRST (soft reset) bit at the wrong time when SCRIPTS
+** are running.
+** So, we need to abort the current operation prior to
+** soft resetting the chip.
+**
+**==========================================================
+*/
+
+static void ncr_chip_reset (ncb_p np)
+{
+ OUTB (nc_istat, SRST);
+ UDELAY (10);
+ OUTB (nc_istat, 0);
+}
+
+static void ncr_soft_reset(ncb_p np)
+{
+ u_char istat;
+ int i;
+
+ OUTB (nc_istat, CABRT);
+ for (i = 1000000 ; i ; --i) {
+ istat = INB (nc_istat);
+ if (istat & SIP) {
+ INW (nc_sist);
+ continue;
+ }
+ if (istat & DIP) {
+ OUTB (nc_istat, 0);
+ INB (nc_dstat);
+ break;
+ }
+ }
+ if (!i)
+ printk("%s: unable to abort current chip operation.\n",
+ ncr_name(np));
+ ncr_chip_reset(np);
+}
+
+/*==========================================================
+**
+**
+** Start reset process.
+** The interrupt handler will reinitialize the chip.
+** The timeout handler will wait for settle_time before
+** clearing it and so resuming command processing.
+**
+**
+**==========================================================
+*/
+static void ncr_start_reset(ncb_p np)
+{
+ (void) ncr_reset_scsi_bus(np, 1, driver_setup.settle_delay);
+}
+
+static int ncr_reset_scsi_bus(ncb_p np, int enab_int, int settle_delay)
+{
+ u_int32 term;
+ int retv = 0;
+
+ np->settle_time = ktime_get(settle_delay * HZ);
+
+ if (bootverbose > 1)
+ printk("%s: resetting, "
+ "command processing suspended for %d seconds\n",
+ ncr_name(np), settle_delay);
+
+ ncr_soft_reset(np); /* Soft reset the chip */
+ UDELAY (2000); /* The 895/6 need time for the bus mode to settle */
+ if (enab_int)
+ OUTW (nc_sien, RST);
+ /*
+ ** Enable Tolerant, reset IRQD if present and
+ ** properly set IRQ mode, prior to resetting the bus.
+ */
+ OUTB (nc_stest3, TE);
+ OUTB (nc_dcntl, (np->rv_dcntl & IRQM));
+ OUTB (nc_scntl1, CRST);
+ UDELAY (200);
+
+ if (!driver_setup.bus_check)
+ goto out;
+ /*
+ ** Check for no terminators or SCSI bus shorts to ground.
+ ** Read SCSI data bus, data parity bits and control signals.
+ ** We are expecting RESET to be TRUE and other signals to be
+ ** FALSE.
+ */
+ term = INB(nc_sstat0);
+ term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
+ term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */
+ ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */
+ ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */
+ INB(nc_sbcl); /* req ack bsy sel atn msg cd io */
+
+ if (!(np->features & FE_WIDE))
+ term &= 0x3ffff;
+
+ if (term != (2<<7)) {
+ printk("%s: suspicious SCSI data while resetting the BUS.\n",
+ ncr_name(np));
+ printk("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
+ "0x%lx, expecting 0x%lx\n",
+ ncr_name(np),
+ (np->features & FE_WIDE) ? "dp1,d15-8," : "",
+ (u_long)term, (u_long)(2<<7));
+ if (driver_setup.bus_check == 1)
+ retv = 1;
+ }
+out:
+ OUTB (nc_scntl1, 0);
+ return retv;
+}
+
+/*==========================================================
+**
+**
+** Reset the SCSI BUS.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_reset_bus (ncb_p np, Scsi_Cmnd *cmd, int sync_reset)
+{
+/* Scsi_Device *device = cmd->device; */
+ ccb_p cp;
+ int found;
+
+/*
+ * Return immediately if reset is in progress.
+ */
+ if (np->settle_time) {
+ return SCSI_RESET_PUNT;
+ }
+/*
+ * Start the reset process.
+ * The script processor is then assumed to be stopped.
+ * Commands will now be queued in the waiting list until a settle
+ * delay of 2 seconds will be completed.
+ */
+ ncr_start_reset(np);
+/*
+ * First, look in the wakeup list
+ */
+ for (found=0, cp=np->ccbc; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd) {
+ found = 1;
+ break;
+ }
+ }
+/*
+ * Then, look in the waiting list
+ */
+ if (!found && retrieve_from_waiting_list(0, np, cmd))
+ found = 1;
+/*
+ * Wake-up all awaiting commands with DID_RESET.
+ */
+ reset_waiting_list(np);
+/*
+ * Wake-up all pending commands with HS_RESET -> DID_RESET.
+ */
+ ncr_wakeup(np, HS_RESET);
+/*
+ * If the involved command was not in a driver queue, and the
+ * scsi driver told us reset is synchronous, and the command is not
+ * currently in the waiting list, complete it with DID_RESET status,
+ * in order to keep it alive.
+ */
+ if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
+ SetScsiResult(cmd, DID_RESET, 0);
+ ncr_queue_done_cmd(np, cmd);
+ }
+
+ return SCSI_RESET_SUCCESS;
+}
+
+/*==========================================================
+**
+**
+** Abort an SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_abort_command (ncb_p np, Scsi_Cmnd *cmd)
+{
+/* Scsi_Device *device = cmd->device; */
+ ccb_p cp;
+
+/*
+ * First, look for the scsi command in the waiting list
+ */
+ if (remove_from_waiting_list(np, cmd)) {
+ SetScsiAbortResult(cmd);
+ ncr_queue_done_cmd(np, cmd);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * Then, look in the wakeup list
+ */
+ for (cp=np->ccbc; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd)
+ break;
+ }
+
+ if (!cp) {
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ /*
+ ** Keep track we have to abort this job.
+ */
+ cp->to_abort = 1;
+
+ /*
+ ** Tell the SCRIPTS processor to stop
+ ** and synchronize with us.
+ */
+ np->istat_sem = SEM;
+
+ /*
+ ** If there are no requests, the script
+ ** processor will sleep on SEL_WAIT_RESEL.
+ ** Let's wake it up, since it may have to work.
+ */
+ OUTB (nc_istat, SIGP|SEM);
+
+ /*
+ ** Tell user we are working for him.
+ */
+ return SCSI_ABORT_PENDING;
+}
+
+/*==========================================================
+**
+** Linux release module stuff.
+**
+** Called before unloading the module
+** Detach the host.
+** We have to free resources and halt the NCR chip
+**
+**==========================================================
+*/
+
+#ifdef MODULE
+static int ncr_detach(ncb_p np)
+{
+ int i;
+
+ printk("%s: detaching ...\n", ncr_name(np));
+
+/*
+** Stop the ncr_timeout process
+** Set release_stage to 1 and wait that ncr_timeout() set it to 2.
+*/
+ np->release_stage = 1;
+ for (i = 50 ; i && np->release_stage != 2 ; i--) MDELAY (100);
+ if (np->release_stage != 2)
+ printk("%s: the timer seems to be already stopped\n",
+ ncr_name(np));
+ else np->release_stage = 2;
+
+/*
+** Reset NCR chip.
+** We should use ncr_soft_reset(), but we donnot want to do
+** so, since we may not be safe if interrupts occur.
+*/
+
+ printk("%s: resetting chip\n", ncr_name(np));
+ ncr_chip_reset(np);
+
+/*
+** Restore bios setting for automatic clock detection.
+*/
+ OUTB(nc_dmode, np->sv_dmode);
+ OUTB(nc_dcntl, np->sv_dcntl);
+ OUTB(nc_ctest3, np->sv_ctest3);
+ OUTB(nc_ctest4, np->sv_ctest4);
+ OUTB(nc_ctest5, np->sv_ctest5);
+ OUTB(nc_gpcntl, np->sv_gpcntl);
+ OUTB(nc_stest2, np->sv_stest2);
+
+ ncr_selectclock(np, np->sv_scntl3);
+/*
+** Free host resources
+*/
+ ncr_free_resources(np);
+
+ return 1;
+}
+#endif
+
+/*==========================================================
+**
+**
+** Complete execution of a SCSI command.
+** Signal completion to the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+
+void ncr_complete (ncb_p np, ccb_p cp)
+{
+ Scsi_Cmnd *cmd;
+ tcb_p tp;
+ lcb_p lp;
+
+ /*
+ ** Sanity check
+ */
+ if (!cp || !cp->cmd)
+ return;
+
+ /*
+ ** Print some debugging info.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp,
+ cp->host_status,cp->scsi_status);
+
+ /*
+ ** Get command, target and lun pointers.
+ */
+
+ cmd = cp->cmd;
+ cp->cmd = NULL;
+ tp = &np->target[cp->target];
+ lp = ncr_lp(np, tp, cp->lun);
+
+ /*
+ ** We donnot queue more than 1 ccb per target
+ ** with negotiation at any time. If this ccb was
+ ** used for negotiation, clear this info in the tcb.
+ */
+
+ if (cp == tp->nego_cp)
+ tp->nego_cp = 0;
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** We just complete the last queued CCB.
+ ** Clear this info that is no more relevant.
+ */
+ if (cp == np->last_cp)
+ np->last_cp = 0;
+#endif
+
+ /*
+ ** If auto-sense performed, change scsi status,
+ ** Otherwise, compute the residual.
+ */
+ if (cp->host_flags & HF_AUTO_SENSE) {
+ cp->scsi_status = cp->sv_scsi_status;
+ cp->xerr_status = cp->sv_xerr_status;
+ }
+ else {
+ cp->resid = 0;
+ if (cp->xerr_status ||
+ cp->phys.header.lastp != cp->phys.header.goalp)
+ cp->resid = ncr_compute_residual(np, cp);
+ }
+
+ /*
+ ** Check for extended errors.
+ */
+
+ if (cp->xerr_status) {
+ if (cp->xerr_status & XE_PARITY_ERR) {
+ PRINT_ADDR(cmd);
+ printk ("unrecovered SCSI parity error.\n");
+ }
+ if (cp->xerr_status & XE_EXTRA_DATA) {
+ PRINT_ADDR(cmd);
+ printk ("extraneous data discarded.\n");
+ }
+ if (cp->xerr_status & XE_BAD_PHASE) {
+ PRINT_ADDR(cmd);
+ printk ("illegal scsi phase (4/5).\n");
+ }
+ if (cp->xerr_status & XE_SODL_UNRUN) {
+ PRINT_ADDR(cmd);
+ printk ("ODD transfer in DATA OUT phase.\n");
+ }
+ if (cp->xerr_status & XE_SWIDE_OVRUN){
+ PRINT_ADDR(cmd);
+ printk ("ODD transfer in DATA IN phase.\n");
+ }
+
+ if (cp->host_status==HS_COMPLETE)
+ cp->host_status = HS_FAIL;
+ }
+
+ /*
+ ** Print out any error for debugging purpose.
+ */
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ if (cp->host_status!=HS_COMPLETE || cp->scsi_status!=S_GOOD ||
+ cp->resid) {
+ PRINT_ADDR(cmd);
+ printk ("ERROR: cmd=%x host_status=%x scsi_status=%x "
+ "data_len=%d residual=%d\n",
+ cmd->cmnd[0], cp->host_status, cp->scsi_status,
+ cp->data_len, cp->resid);
+ }
+ }
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,99)
+ /*
+ ** Move residual byte count to user structure.
+ */
+ cmd->resid = cp->resid;
+#endif
+ /*
+ ** Check the status.
+ */
+ if ( (cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_GOOD ||
+ cp->scsi_status == S_COND_MET)) {
+ /*
+ ** All went well (GOOD status).
+ ** CONDITION MET status is returned on
+ ** `Pre-Fetch' or `Search data' success.
+ */
+ SetScsiResult(cmd, DID_OK, cp->scsi_status);
+
+ /*
+ ** Allocate the lcb if not yet.
+ */
+ if (!lp)
+ ncr_alloc_lcb (np, cp->target, cp->lun);
+
+ /*
+ ** On standard INQUIRY response (EVPD and CmDt
+ ** not set), setup logical unit according to
+ ** announced capabilities (we need the 1rst 7 bytes).
+ */
+ if (cmd->cmnd[0] == 0x12 && !(cmd->cmnd[1] & 0x3) &&
+ cmd->cmnd[4] >= 7 && !cmd->use_sg) {
+ sync_scsi_data(np, cmd); /* SYNC the data */
+ ncr_setup_lcb (np, cp->target, cp->lun,
+ (char *) cmd->request_buffer);
+ }
+
+ /*
+ ** If tags was reduced due to queue full,
+ ** increase tags if 1000 good status received.
+ */
+ if (lp && lp->usetags && lp->numtags < lp->maxtags) {
+ ++lp->num_good;
+ if (lp->num_good >= 1000) {
+ lp->num_good = 0;
+ ++lp->numtags;
+ ncr_setup_tags (np, cp->target, cp->lun);
+ }
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_CHECK_COND)) {
+ /*
+ ** Check condition code
+ */
+ SetScsiResult(cmd, DID_OK, S_CHECK_COND);
+
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ PRINT_ADDR(cmd);
+ ncr_printl_hex("sense data:", cmd->sense_buffer, 14);
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_CONFLICT)) {
+ /*
+ ** Reservation Conflict condition code
+ */
+ SetScsiResult(cmd, DID_OK, S_CONFLICT);
+
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_BUSY ||
+ cp->scsi_status == S_QUEUE_FULL)) {
+
+ /*
+ ** Target is busy.
+ */
+ SetScsiResult(cmd, DID_OK, cp->scsi_status);
+
+ } else if ((cp->host_status == HS_SEL_TIMEOUT)
+ || (cp->host_status == HS_TIMEOUT)) {
+
+ /*
+ ** No response
+ */
+ SetScsiResult(cmd, DID_TIME_OUT, cp->scsi_status);
+
+ } else if (cp->host_status == HS_RESET) {
+
+ /*
+ ** SCSI bus reset
+ */
+ SetScsiResult(cmd, DID_RESET, cp->scsi_status);
+
+ } else if (cp->host_status == HS_ABORTED) {
+
+ /*
+ ** Transfer aborted
+ */
+ SetScsiAbortResult(cmd);
+
+ } else {
+ int did_status;
+
+ /*
+ ** Other protocol messes
+ */
+ PRINT_ADDR(cmd);
+ printk ("COMMAND FAILED (%x %x) @%p.\n",
+ cp->host_status, cp->scsi_status, cp);
+
+ did_status = DID_ERROR;
+ if (cp->xerr_status & XE_PARITY_ERR)
+ did_status = DID_PARITY;
+
+ SetScsiResult(cmd, did_status, cp->scsi_status);
+ }
+
+ /*
+ ** trace output
+ */
+
+ if (tp->usrflag & UF_TRACE) {
+ PRINT_ADDR(cmd);
+ printk (" CMD:");
+ ncr_print_hex(cmd->cmnd, cmd->cmd_len);
+
+ if (cp->host_status==HS_COMPLETE) {
+ switch (cp->scsi_status) {
+ case S_GOOD:
+ printk (" GOOD");
+ break;
+ case S_CHECK_COND:
+ printk (" SENSE:");
+ ncr_print_hex(cmd->sense_buffer, 14);
+ break;
+ default:
+ printk (" STAT: %x\n", cp->scsi_status);
+ break;
+ }
+ } else printk (" HOSTERROR: %x", cp->host_status);
+ printk ("\n");
+ }
+
+ /*
+ ** Free this ccb
+ */
+ ncr_free_ccb (np, cp);
+
+ /*
+ ** requeue awaiting scsi commands for this lun.
+ */
+ if (lp && lp->queuedccbs < lp->queuedepth &&
+ !xpt_que_empty(&lp->wait_ccbq))
+ ncr_start_next_ccb(np, lp, 2);
+
+ /*
+ ** requeue awaiting scsi commands for this controller.
+ */
+ if (np->waiting_list)
+ requeue_waiting_list(np);
+
+ /*
+ ** signal completion to generic driver.
+ */
+ ncr_queue_done_cmd(np, cmd);
+}
+
+/*==========================================================
+**
+**
+** Signal all (or one) control block done.
+**
+**
+**==========================================================
+*/
+
+/*
+** The NCR has completed CCBs.
+** Look at the DONE QUEUE.
+**
+** On architectures that may reorder LOAD/STORE operations,
+** a memory barrier may be needed after the reading of the
+** so-called `flag' and prior to dealing with the data.
+*/
+int ncr_wakeup_done (ncb_p np)
+{
+ ccb_p cp;
+ int i, n;
+ u_long dsa;
+
+ n = 0;
+ i = np->dqueueget;
+ while (1) {
+ dsa = scr_to_cpu(np->dqueue[i]);
+ if (!dsa)
+ break;
+ np->dqueue[i] = 0;
+ if ((i = i+2) >= MAX_START*2)
+ i = 0;
+
+ cp = ncr_ccb_from_dsa(np, dsa);
+ if (cp) {
+ MEMORY_BARRIER();
+ ncr_complete (np, cp);
+ ++n;
+ }
+ else
+ printk (KERN_ERR "%s: bad DSA (%lx) in done queue.\n",
+ ncr_name(np), dsa);
+ }
+ np->dqueueget = i;
+
+ return n;
+}
+
+/*
+** Complete all active CCBs.
+*/
+void ncr_wakeup (ncb_p np, u_long code)
+{
+ ccb_p cp = np->ccbc;
+
+ while (cp) {
+ if (cp->host_status != HS_IDLE) {
+ cp->host_status = code;
+ ncr_complete (np, cp);
+ }
+ cp = cp->link_ccb;
+ }
+}
+
+/*==========================================================
+**
+**
+** Start NCR chip.
+**
+**
+**==========================================================
+*/
+
+void ncr_init (ncb_p np, int reset, char * msg, u_long code)
+{
+ int i;
+ u_long phys;
+
+ /*
+ ** Reset chip if asked, otherwise just clear fifos.
+ */
+
+ if (reset)
+ ncr_soft_reset(np);
+ else {
+ OUTB (nc_stest3, TE|CSF);
+ OUTONB (nc_ctest3, CLF);
+ }
+
+ /*
+ ** Message.
+ */
+
+ if (msg) printk (KERN_INFO "%s: restart (%s).\n", ncr_name (np), msg);
+
+ /*
+ ** Clear Start Queue
+ */
+ phys = np->p_squeue;
+ np->queuedepth = MAX_START - 1; /* 1 entry needed as end marker */
+ for (i = 0; i < MAX_START*2; i += 2) {
+ np->squeue[i] = cpu_to_scr(np->p_idletask);
+ np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
+ }
+ np->squeue[MAX_START*2-1] = cpu_to_scr(phys);
+
+
+ /*
+ ** Start at first entry.
+ */
+ np->squeueput = 0;
+ np->scripth0->startpos[0] = cpu_to_scr(phys);
+
+ /*
+ ** Clear Done Queue
+ */
+ phys = vtobus(np->dqueue);
+ for (i = 0; i < MAX_START*2; i += 2) {
+ np->dqueue[i] = 0;
+ np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
+ }
+ np->dqueue[MAX_START*2-1] = cpu_to_scr(phys);
+
+ /*
+ ** Start at first entry.
+ */
+ np->scripth0->done_pos[0] = cpu_to_scr(phys);
+ np->dqueueget = 0;
+
+ /*
+ ** Wakeup all pending jobs.
+ */
+ ncr_wakeup (np, code);
+
+ /*
+ ** Init chip.
+ */
+
+ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */
+ UDELAY (2000); /* The 895 needs time for the bus mode to settle */
+
+ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
+ /* full arb., ena parity, par->ATN */
+ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
+
+ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
+
+ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
+ OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */
+ OUTB (nc_istat , SIGP ); /* Signal Process */
+ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */
+ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
+
+ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
+ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */
+ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66)){
+ OUTB (nc_stest2, EXT|np->rv_stest2);
+ /* Extended Sreq/Sack filtering, not supported in C1010/C1010_66 */
+ }
+ OUTB (nc_stest3, TE); /* TolerANT enable */
+ OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */
+
+ /*
+ ** DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
+ ** Disable overlapped arbitration for all dual-function
+ ** devices, regardless revision id.
+ ** We may consider it is a post-chip-design feature. ;-)
+ **
+ ** Errata applies to all 896 and 1010 parts.
+ */
+ if (np->device_id == PCI_DEVICE_ID_NCR_53C875)
+ OUTB (nc_ctest0, (1<<5));
+ else if (np->device_id == PCI_DEVICE_ID_NCR_53C896 ||
+ np->device_id == PCI_DEVICE_ID_LSI_53C1010 ||
+ np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 )
+ np->rv_ccntl0 |= DPR;
+
+ /*
+ ** C1010_66MHz rev 0 part requies AIPCNTL1 bit 3 to be set.
+ */
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)
+ OUTB(nc_aipcntl1, (1<<3));
+
+ /*
+ ** If 64 bit (895A/896/1010/1010_66) write the CCNTL1 register to
+ ** enable 40 bit address table indirect addressing for MOVE.
+ ** Also write CCNTL0 if 64 bit chip, since this register seems
+ ** to only be used by 64 bit cores.
+ */
+ if (np->features & FE_64BIT) {
+ OUTB (nc_ccntl0, np->rv_ccntl0);
+ OUTB (nc_ccntl1, np->rv_ccntl1);
+ }
+
+ /*
+ ** If phase mismatch handled by scripts (53C895A or 53C896
+ ** or 53C1010 or 53C1010_66), set PM jump addresses.
+ */
+
+ if (np->features & FE_NOPM) {
+ printk(KERN_INFO "%s: handling phase mismatch from SCRIPTS.\n",
+ ncr_name(np));
+ OUTL (nc_pmjad1, NCB_SCRIPTH_PHYS (np, pm_handle));
+ OUTL (nc_pmjad2, NCB_SCRIPTH_PHYS (np, pm_handle));
+ }
+
+ /*
+ ** Enable GPIO0 pin for writing if LED support from SCRIPTS.
+ ** Also set GPIO5 and clear GPIO6 if hardware LED control.
+ */
+
+ if (np->features & FE_LED0)
+ OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01);
+ else if (np->features & FE_LEDC)
+ OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20);
+
+
+ /*
+ ** enable ints
+ */
+
+ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
+ OUTB (nc_dien , MDPE|BF|SSI|SIR|IID);
+
+ /*
+ ** For 895/895A/896/c1010
+ ** Enable SBMC interrupt and save current SCSI bus mode.
+ */
+ if ( (np->features & FE_ULTRA2) || (np->features & FE_ULTRA3) ) {
+ OUTONW (nc_sien, SBMC);
+ np->scsi_mode = INB (nc_stest4) & SMODE;
+ }
+
+ /*
+ ** Fill in target structure.
+ ** Reinitialize usrsync.
+ ** Reinitialize usrwide.
+ ** Prepare sync negotiation according to actual SCSI bus mode.
+ */
+
+ for (i=0;i<MAX_TARGET;i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->to_reset = 0;
+
+ tp->sval = 0;
+ tp->wval = np->rv_scntl3;
+ tp->uval = np->rv_scntl4;
+
+ if (tp->usrsync != 255) {
+ if (tp->usrsync <= np->maxsync) {
+ if (tp->usrsync < np->minsync) {
+ tp->usrsync = np->minsync;
+ }
+ }
+ else
+ tp->usrsync = 255;
+ };
+
+ if (tp->usrwide > np->maxwide)
+ tp->usrwide = np->maxwide;
+
+ ncr_negotiate (np, tp);
+ }
+
+ /*
+ ** Download SCSI SCRIPTS to on-chip RAM if present,
+ ** and start script processor.
+ ** We do the download preferently from the CPU.
+ ** For platforms that may not support PCI memory mapping,
+ ** we use a simple SCRIPTS that performs MEMORY MOVEs.
+ */
+ if (np->base2_ba) {
+ if (bootverbose)
+ printk ("%s: Downloading SCSI SCRIPTS.\n",
+ ncr_name(np));
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ if (np->base2_ws == 8192)
+ phys = NCB_SCRIPTH0_PHYS (np, start_ram64);
+ else
+ phys = NCB_SCRIPTH_PHYS (np, start_ram);
+#else
+ if (np->base2_ws == 8192) {
+ memcpy_to_pci(np->base2_va + 4096,
+ np->scripth0, sizeof(struct scripth));
+ OUTL (nc_mmws, np->scr_ram_seg);
+ OUTL (nc_mmrs, np->scr_ram_seg);
+ OUTL (nc_sfs, np->scr_ram_seg);
+ phys = NCB_SCRIPTH_PHYS (np, start64);
+ }
+ else
+ phys = NCB_SCRIPT_PHYS (np, init);
+ memcpy_to_pci(np->base2_va, np->script0, sizeof(struct script));
+#endif /* SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+ }
+ else
+ phys = NCB_SCRIPT_PHYS (np, init);
+
+ np->istat_sem = 0;
+
+ OUTL (nc_dsa, np->p_ncb);
+ OUTL_DSP (phys);
+}
+
+/*==========================================================
+**
+** Prepare the negotiation values for wide and
+** synchronous transfers.
+**
+**==========================================================
+*/
+
+static void ncr_negotiate (struct ncb* np, struct tcb* tp)
+{
+ /*
+ ** minsync unit is 4ns !
+ */
+
+ u_long minsync = tp->usrsync;
+
+ /*
+ ** SCSI bus mode limit
+ */
+
+ if (np->scsi_mode && np->scsi_mode == SMODE_SE) {
+ if (minsync < 12) minsync = 12;
+ }
+
+ /*
+ ** our limit ..
+ */
+
+ if (minsync < np->minsync)
+ minsync = np->minsync;
+
+ /*
+ ** divider limit
+ */
+
+ if (minsync > np->maxsync)
+ minsync = 255;
+
+ tp->minsync = minsync;
+ tp->maxoffs = (minsync<255 ? np->maxoffs : 0);
+
+ /*
+ ** period=0: has to negotiate sync transfer
+ */
+
+ tp->period=0;
+
+ /*
+ ** widedone=0: has to negotiate wide transfer
+ */
+ tp->widedone=0;
+}
+
+/*==========================================================
+**
+** Get clock factor and sync divisor for a given
+** synchronous factor period.
+** Returns the clock factor (in sxfer) and scntl3
+** synchronous divisor field.
+**
+**==========================================================
+*/
+
+static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p)
+{
+ u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */
+ int div = np->clock_divn; /* Number of divisors supported */
+ u_long fak; /* Sync factor in sxfer */
+ u_long per; /* Period in tenths of ns */
+ u_long kpc; /* (per * clk) */
+
+ /*
+ ** Compute the synchronous period in tenths of nano-seconds
+ ** from sfac.
+ **
+ ** Note, if sfac == 9, DT is being used. Double the period of 125
+ ** to 250.
+ */
+ if (sfac <= 10) per = 250;
+ else if (sfac == 11) per = 303;
+ else if (sfac == 12) per = 500;
+ else per = 40 * sfac;
+
+ /*
+ ** Look for the greatest clock divisor that allows an
+ ** input speed faster than the period.
+ */
+ kpc = per * clk;
+ while (--div >= 0)
+ if (kpc >= (div_10M[div] << 2)) break;
+
+ /*
+ ** Calculate the lowest clock factor that allows an output
+ ** speed not faster than the period.
+ */
+ fak = (kpc - 1) / div_10M[div] + 1;
+
+#if 0 /* This optimization does not seem very usefull */
+
+ per = (fak * div_10M[div]) / clk;
+
+ /*
+ ** Why not to try the immediate lower divisor and to choose
+ ** the one that allows the fastest output speed ?
+ ** We dont want input speed too much greater than output speed.
+ */
+ if (div >= 1 && fak < 8) {
+ u_long fak2, per2;
+ fak2 = (kpc - 1) / div_10M[div-1] + 1;
+ per2 = (fak2 * div_10M[div-1]) / clk;
+ if (per2 < per && fak2 <= 8) {
+ fak = fak2;
+ per = per2;
+ --div;
+ }
+ }
+#endif
+
+ if (fak < 4) fak = 4; /* Should never happen, too bad ... */
+
+ /*
+ ** Compute and return sync parameters for the ncr
+ */
+ *fakp = fak - 4;
+
+ /*
+ ** If sfac < 25, and 8xx parts, desire that the chip operate at
+ ** least at Ultra speeds. Must set bit 7 of scntl3.
+ ** For C1010, do not set this bit. If operating at Ultra3 speeds,
+ ** set the U3EN bit instead.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ *scntl3p = (div+1) << 4;
+ *fakp = 0;
+ }
+ else {
+ *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0);
+ *fakp = fak - 4;
+ }
+}
+
+/*==========================================================
+**
+** Utility routine to return the current bus width
+** synchronous period and offset.
+** Utilizes target sval, wval and uval
+**
+**==========================================================
+*/
+static void ncr_get_xfer_info(ncb_p np, tcb_p tp, u_char *factor,
+ u_char *offset, u_char *width)
+{
+
+ u_char idiv;
+ u_long period;
+
+ *width = (tp->wval & EWS) ? 1 : 0;
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ *offset = (tp->sval & 0x3f);
+ else
+ *offset = (tp->sval & 0x1f);
+
+ /*
+ * Midlayer signal to the driver that all of the scsi commands
+ * for the integrity check have completed. Save the negotiated
+ * parameters (extracted from sval, wval and uval).
+ * See ncr_setsync for alg. details.
+ */
+
+ idiv = (tp->wval>>4) & 0x07;
+
+ if ( *offset && idiv ) {
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)){
+ if (tp->uval & 0x80)
+ period = (2*div_10M[idiv-1])/np->clock_khz;
+ else
+ period = (4*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ period = (((tp->sval>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ period = 0xffff;
+
+ if (period <= 125) *factor = 9;
+ else if (period <= 250) *factor = 10;
+ else if (period <= 303) *factor = 11;
+ else if (period <= 500) *factor = 12;
+ else *factor = (period + 40 - 1) / 40;
+
+}
+
+
+/*==========================================================
+**
+** Set actual values, sync status and patch all ccbs of
+** a target according to new sync/wide agreement.
+**
+**==========================================================
+*/
+
+static void ncr_set_sync_wide_status (ncb_p np, u_char target)
+{
+ ccb_p cp = np->ccbc;
+ tcb_p tp = &np->target[target];
+
+ /*
+ ** set actual value and sync_status
+ **
+ ** TEMP register contains current scripts address
+ ** which is data type/direction/dependent.
+ */
+ OUTB (nc_sxfer, tp->sval);
+ OUTB (nc_scntl3, tp->wval);
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ OUTB (nc_scntl4, tp->uval);
+
+ /*
+ ** patch ALL ccbs of this target.
+ */
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status == HS_IDLE)
+ continue;
+ if (cp->target != target)
+ continue;
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ cp->phys.select.sel_scntl4 = tp->uval;
+ };
+}
+
+/*==========================================================
+**
+** Switch sync mode for current job and it's target
+**
+**==========================================================
+*/
+
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer,
+ u_char scntl4)
+{
+ tcb_p tp;
+ u_char target = INB (nc_sdid) & 0x0f;
+ u_char idiv;
+ u_char offset;
+
+ assert (cp);
+ if (!cp) return;
+
+ assert (target == (cp->target & 0xf));
+
+ tp = &np->target[target];
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ offset = sxfer & 0x3f; /* bits 5-0 */
+ scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS);
+ scntl4 = (scntl4 & 0x80);
+ }
+ else {
+ offset = sxfer & 0x1f; /* bits 4-0 */
+ if (!scntl3 || !offset)
+ scntl3 = np->rv_scntl3;
+
+ scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS) |
+ (np->rv_scntl3 & 0x07);
+ }
+
+
+ /*
+ ** Deduce the value of controller sync period from scntl3.
+ ** period is in tenths of nano-seconds.
+ */
+
+ idiv = ((scntl3 >> 4) & 0x7);
+ if ( offset && idiv) {
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ /* Note: If extra data hold clocks are used,
+ * the formulas below must be modified.
+ * When scntl4 == 0, ST mode.
+ */
+ if (scntl4 & 0x80)
+ tp->period = (2*div_10M[idiv-1])/np->clock_khz;
+ else
+ tp->period = (4*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = 0xffff;
+
+
+ /*
+ ** Stop there if sync parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3 && tp->uval == scntl4) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+ tp->uval = scntl4;
+
+ /*
+ ** Bells and whistles ;-)
+ ** Donnot announce negotiations due to auto-sense,
+ ** unless user really want us to be verbose. :)
+ */
+ if ( bootverbose < 2 && (cp->host_flags & HF_AUTO_SENSE))
+ goto next;
+ PRINT_TARGET(np, target);
+ if (offset) {
+ unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0);
+ unsigned mb10 = (f10 + tp->period/2) / tp->period;
+ char *scsi;
+
+ /*
+ ** Disable extended Sreq/Sack filtering
+ */
+ if ((tp->period <= 2000) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ OUTOFFB (nc_stest2, EXT);
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (tp->period < 250) scsi = "FAST-80";
+ else if (tp->period < 500) scsi = "FAST-40";
+ else if (tp->period < 1000) scsi = "FAST-20";
+ else if (tp->period < 2000) scsi = "FAST-10";
+ else scsi = "FAST-5";
+
+ printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ tp->widedone > 1 ? "WIDE " : "",
+ mb10 / 10, mb10 % 10, tp->period / 10, offset);
+ } else
+ printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
+next:
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+
+/*==========================================================
+**
+** Switch wide mode for current job and it's target
+** SCSI specs say: a SCSI device that accepts a WDTR
+** message shall reset the synchronous agreement to
+** asynchronous mode.
+**
+**==========================================================
+*/
+
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack)
+{
+ u_short target = INB (nc_sdid) & 0x0f;
+ tcb_p tp;
+ u_char scntl3;
+ u_char sxfer;
+
+ assert (cp);
+ if (!cp) return;
+
+ assert (target == (cp->target & 0xf));
+
+ tp = &np->target[target];
+ tp->widedone = wide+1;
+ scntl3 = (tp->wval & (~EWS)) | (wide ? EWS : 0);
+
+ sxfer = ack ? 0 : tp->sval;
+
+ /*
+ ** Stop there if sync/wide parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (bootverbose >= 2) {
+ PRINT_TARGET(np, target);
+ if (scntl3 & EWS)
+ printk ("WIDE SCSI (16 bit) enabled.\n");
+ else
+ printk ("WIDE SCSI disabled.\n");
+ }
+
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+
+/*==========================================================
+**
+** Switch sync/wide mode for current job and it's target
+** PPR negotiations only
+**
+**==========================================================
+*/
+
+static void ncr_setsyncwide (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer,
+ u_char scntl4, u_char wide)
+{
+ tcb_p tp;
+ u_char target = INB (nc_sdid) & 0x0f;
+ u_char idiv;
+ u_char offset;
+
+ assert (cp);
+ if (!cp) return;
+
+ assert (target == (cp->target & 0xf));
+
+ tp = &np->target[target];
+ tp->widedone = wide+1;
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ offset = sxfer & 0x3f; /* bits 5-0 */
+ scntl3 = (scntl3 & 0xf0) | (wide ? EWS : 0);
+ scntl4 = (scntl4 & 0x80);
+ }
+ else {
+ offset = sxfer & 0x1f; /* bits 4-0 */
+ if (!scntl3 || !offset)
+ scntl3 = np->rv_scntl3;
+
+ scntl3 = (scntl3 & 0xf0) | (wide ? EWS : 0) |
+ (np->rv_scntl3 & 0x07);
+ }
+
+
+ /*
+ ** Deduce the value of controller sync period from scntl3.
+ ** period is in tenths of nano-seconds.
+ */
+
+ idiv = ((scntl3 >> 4) & 0x7);
+ if ( offset && idiv) {
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ /* Note: If extra data hold clocks are used,
+ * the formulas below must be modified.
+ * When scntl4 == 0, ST mode.
+ */
+ if (scntl4 & 0x80)
+ tp->period = (2*div_10M[idiv-1])/np->clock_khz;
+ else
+ tp->period = (4*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = 0xffff;
+
+
+ /*
+ ** Stop there if sync parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3 && tp->uval == scntl4) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+ tp->uval = scntl4;
+
+ /*
+ ** Bells and whistles ;-)
+ ** Donnot announce negotiations due to auto-sense,
+ ** unless user really want us to be verbose. :)
+ */
+ if ( bootverbose < 2 && (cp->host_flags & HF_AUTO_SENSE))
+ goto next;
+ PRINT_TARGET(np, target);
+ if (offset) {
+ unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0);
+ unsigned mb10 = (f10 + tp->period/2) / tp->period;
+ char *scsi;
+
+ /*
+ ** Disable extended Sreq/Sack filtering
+ */
+ if ((tp->period <= 2000) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ OUTOFFB (nc_stest2, EXT);
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (tp->period < 250) scsi = "FAST-80";
+ else if (tp->period < 500) scsi = "FAST-40";
+ else if (tp->period < 1000) scsi = "FAST-20";
+ else if (tp->period < 2000) scsi = "FAST-10";
+ else scsi = "FAST-5";
+
+ printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ tp->widedone > 1 ? "WIDE " : "",
+ mb10 / 10, mb10 % 10, tp->period / 10, offset);
+ } else
+ printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
+next:
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+
+
+
+/*==========================================================
+**
+** Switch tagged mode for a target.
+**
+**==========================================================
+*/
+
+static void ncr_setup_tags (ncb_p np, u_char tn, u_char ln)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+ u_short reqtags, maxdepth;
+
+ /*
+ ** Just in case ...
+ */
+ if ((!tp) || (!lp))
+ return;
+
+ /*
+ ** If SCSI device queue depth is not yet set, leave here.
+ */
+ if (!lp->scdev_depth)
+ return;
+
+ /*
+ ** Donnot allow more tags than the SCSI driver can queue
+ ** for this device.
+ ** Donnot allow more tags than we can handle.
+ */
+ maxdepth = lp->scdev_depth;
+ if (maxdepth > lp->maxnxs) maxdepth = lp->maxnxs;
+ if (lp->maxtags > maxdepth) lp->maxtags = maxdepth;
+ if (lp->numtags > maxdepth) lp->numtags = maxdepth;
+
+ /*
+ ** only devices conformant to ANSI Version >= 2
+ ** only devices capable of tagged commands
+ ** only if enabled by user ..
+ */
+ if ((lp->inq_byte7 & INQ7_QUEUE) && lp->numtags > 1) {
+ reqtags = lp->numtags;
+ } else {
+ reqtags = 1;
+ };
+
+ /*
+ ** Update max number of tags
+ */
+ lp->numtags = reqtags;
+ if (lp->numtags > lp->maxtags)
+ lp->maxtags = lp->numtags;
+
+ /*
+ ** If we want to switch tag mode, we must wait
+ ** for no CCB to be active.
+ */
+ if (reqtags > 1 && lp->usetags) { /* Stay in tagged mode */
+ if (lp->queuedepth == reqtags) /* Already announced */
+ return;
+ lp->queuedepth = reqtags;
+ }
+ else if (reqtags <= 1 && !lp->usetags) { /* Stay in untagged mode */
+ lp->queuedepth = reqtags;
+ return;
+ }
+ else { /* Want to switch tag mode */
+ if (lp->busyccbs) /* If not yet safe, return */
+ return;
+ lp->queuedepth = reqtags;
+ lp->usetags = reqtags > 1 ? 1 : 0;
+ }
+
+ /*
+ ** Patch the lun mini-script, according to tag mode.
+ */
+ lp->resel_task = lp->usetags?
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_tag)) :
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag));
+
+ /*
+ ** Announce change to user.
+ */
+ if (bootverbose) {
+ PRINT_LUN(np, tn, ln);
+ if (lp->usetags)
+ printk("tagged command queue depth set to %d\n", reqtags);
+ else
+ printk("tagged command queueing disabled\n");
+ }
+}
+
+/*----------------------------------------------------
+**
+** handle user commands
+**
+**----------------------------------------------------
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+static void ncr_usercmd (ncb_p np)
+{
+ u_char t;
+ tcb_p tp;
+ int ln;
+ u_long size;
+
+ switch (np->user.cmd) {
+ case 0: return;
+
+ case UC_SETDEBUG:
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = np->user.data;
+#endif
+ break;
+
+ case UC_SETORDER:
+ np->order = np->user.data;
+ break;
+
+ case UC_SETVERBOSE:
+ np->verbose = np->user.data;
+ break;
+
+ default:
+ /*
+ ** We assume that other commands apply to targets.
+ ** This should always be the case and avoid the below
+ ** 4 lines to be repeated 5 times.
+ */
+ for (t = 0; t < MAX_TARGET; t++) {
+ if (!((np->user.target >> t) & 1))
+ continue;
+ tp = &np->target[t];
+
+ switch (np->user.cmd) {
+
+ case UC_SETSYNC:
+ tp->usrsync = np->user.data;
+ ncr_negotiate (np, tp);
+ break;
+
+ case UC_SETWIDE:
+ size = np->user.data;
+ if (size > np->maxwide)
+ size=np->maxwide;
+ tp->usrwide = size;
+ ncr_negotiate (np, tp);
+ break;
+
+ case UC_SETTAGS:
+ tp->usrtags = np->user.data;
+ for (ln = 0; ln < MAX_LUN; ln++) {
+ lcb_p lp;
+ lp = ncr_lp(np, tp, ln);
+ if (!lp)
+ continue;
+ lp->numtags = np->user.data;
+ lp->maxtags = lp->numtags;
+ ncr_setup_tags (np, t, ln);
+ }
+ break;
+
+ case UC_RESETDEV:
+ tp->to_reset = 1;
+ np->istat_sem = SEM;
+ OUTB (nc_istat, SIGP|SEM);
+ break;
+
+ case UC_CLEARDEV:
+ for (ln = 0; ln < MAX_LUN; ln++) {
+ lcb_p lp;
+ lp = ncr_lp(np, tp, ln);
+ if (lp)
+ lp->to_clear = 1;
+ }
+ np->istat_sem = SEM;
+ OUTB (nc_istat, SIGP|SEM);
+ break;
+
+ case UC_SETFLAG:
+ tp->usrflag = np->user.data;
+ break;
+ }
+ }
+ break;
+ }
+ np->user.cmd=0;
+}
+#endif
+
+/*==========================================================
+**
+**
+** ncr timeout handler.
+**
+**
+**==========================================================
+**
+** Misused to keep the driver running when
+** interrupts are not configured correctly.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_timeout (ncb_p np)
+{
+ u_long thistime = ktime_get(0);
+
+ /*
+ ** If release process in progress, let's go
+ ** Set the release stage from 1 to 2 to synchronize
+ ** with the release process.
+ */
+
+ if (np->release_stage) {
+ if (np->release_stage == 1) np->release_stage = 2;
+ return;
+ }
+
+#ifdef SCSI_NCR_PCIQ_BROKEN_INTR
+ np->timer.expires = ktime_get((HZ+9)/10);
+#else
+ np->timer.expires = ktime_get(SCSI_NCR_TIMER_INTERVAL);
+#endif
+ add_timer(&np->timer);
+
+ /*
+ ** If we are resetting the ncr, wait for settle_time before
+ ** clearing it. Then command processing will be resumed.
+ */
+ if (np->settle_time) {
+ if (np->settle_time <= thistime) {
+ if (bootverbose > 1)
+ printk("%s: command processing resumed\n", ncr_name(np));
+ np->settle_time = 0;
+ requeue_waiting_list(np);
+ }
+ return;
+ }
+
+ /*
+ ** Nothing to do for now, but that may come.
+ */
+ if (np->lasttime + 4*HZ < thistime) {
+ np->lasttime = thistime;
+ }
+
+#ifdef SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+ /*
+ ** Some way-broken PCI bridges may lead to
+ ** completions being lost when the clearing
+ ** of the INTFLY flag by the CPU occurs
+ ** concurrently with the chip raising this flag.
+ ** If this ever happen, lost completions will
+ ** be reaped here.
+ */
+ ncr_wakeup_done(np);
+#endif
+
+#ifdef SCSI_NCR_PCIQ_BROKEN_INTR
+ if (INB(nc_istat) & (INTF|SIP|DIP)) {
+
+ /*
+ ** Process pending interrupts.
+ */
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("{");
+ ncr_exception (np);
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("}");
+ }
+#endif /* SCSI_NCR_PCIQ_BROKEN_INTR */
+}
+
+/*==========================================================
+**
+** log message for real hard errors
+**
+** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)."
+** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf."
+**
+** exception register:
+** ds: dstat
+** si: sist
+**
+** SCSI bus lines:
+** so: control lines as driven by NCR.
+** si: control lines as seen by NCR.
+** sd: scsi data lines as seen by NCR.
+**
+** wide/fastmode:
+** sxfer: (see the manual)
+** scntl3: (see the manual)
+**
+** current script command:
+** dsp: script address (relative to start of script).
+** dbc: first word of script command.
+**
+** First 24 register of the chip:
+** r0..rf
+**
+**==========================================================
+*/
+
+static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat)
+{
+ u_int32 dsp;
+ int script_ofs;
+ int script_size;
+ char *script_name;
+ u_char *script_base;
+ int i;
+
+ dsp = INL (nc_dsp);
+
+ if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) {
+ script_ofs = dsp - np->p_script;
+ script_size = sizeof(struct script);
+ script_base = (u_char *) np->script0;
+ script_name = "script";
+ }
+ else if (np->p_scripth < dsp &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ script_ofs = dsp - np->p_scripth;
+ script_size = sizeof(struct scripth);
+ script_base = (u_char *) np->scripth0;
+ script_name = "scripth";
+ } else {
+ script_ofs = dsp;
+ script_size = 0;
+ script_base = 0;
+ script_name = "mem";
+ }
+
+ printk ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
+ ncr_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist,
+ (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl),
+ (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs,
+ (unsigned)INL (nc_dbc));
+
+ if (((script_ofs & 3) == 0) &&
+ (unsigned)script_ofs < script_size) {
+ printk ("%s: script cmd = %08x\n", ncr_name(np),
+ scr_to_cpu((int) *(ncrcmd *)(script_base + script_ofs)));
+ }
+
+ printk ("%s: regdump:", ncr_name(np));
+ for (i=0; i<24;i++)
+ printk (" %02x", (unsigned)INB_OFF(i));
+ printk (".\n");
+}
+
+/*============================================================
+**
+** ncr chip exception handler.
+**
+**============================================================
+**
+** In normal situations, interrupt conditions occur one at
+** a time. But when something bad happens on the SCSI BUS,
+** the chip may raise several interrupt flags before
+** stopping and interrupting the CPU. The additionnal
+** interrupt flags are stacked in some extra registers
+** after the SIP and/or DIP flag has been raised in the
+** ISTAT. After the CPU has read the interrupt condition
+** flag from SIST or DSTAT, the chip unstacks the other
+** interrupt flags and sets the corresponding bits in
+** SIST or DSTAT. Since the chip starts stacking once the
+** SIP or DIP flag is set, there is a small window of time
+** where the stacking does not occur.
+**
+** Typically, multiple interrupt conditions may happen in
+** the following situations:
+**
+** - SCSI parity error + Phase mismatch (PAR|MA)
+** When an parity error is detected in input phase
+** and the device switches to msg-in phase inside a
+** block MOV.
+** - SCSI parity error + Unexpected disconnect (PAR|UDC)
+** When a stupid device does not want to handle the
+** recovery of an SCSI parity error.
+** - Some combinations of STO, PAR, UDC, ...
+** When using non compliant SCSI stuff, when user is
+** doing non compliant hot tampering on the BUS, when
+** something really bad happens to a device, etc ...
+**
+** The heuristic suggested by SYMBIOS to handle
+** multiple interrupts is to try unstacking all
+** interrupts conditions and to handle them on some
+** priority based on error severity.
+** This will work when the unstacking has been
+** successful, but we cannot be 100 % sure of that,
+** since the CPU may have been faster to unstack than
+** the chip is able to stack. Hmmm ... But it seems that
+** such a situation is very unlikely to happen.
+**
+** If this happen, for example STO catched by the CPU
+** then UDC happenning before the CPU have restarted
+** the SCRIPTS, the driver may wrongly complete the
+** same command on UDC, since the SCRIPTS didn't restart
+** and the DSA still points to the same command.
+** We avoid this situation by setting the DSA to an
+** invalid value when the CCB is completed and before
+** restarting the SCRIPTS.
+**
+** Another issue is that we need some section of our
+** recovery procedures to be somehow uninterruptible and
+** that the SCRIPTS processor does not provides such a
+** feature. For this reason, we handle recovery preferently
+** from the C code and check against some SCRIPTS
+** critical sections from the C code.
+**
+** Hopefully, the interrupt handling of the driver is now
+** able to resist to weird BUS error conditions, but donnot
+** ask me for any guarantee that it will never fail. :-)
+** Use at your own decision and risk.
+**
+**============================================================
+*/
+
+void ncr_exception (ncb_p np)
+{
+ u_char istat, istatc;
+ u_char dstat;
+ u_short sist;
+ int i;
+
+ /*
+ ** interrupt on the fly ?
+ **
+ ** A `dummy read' is needed to ensure that the
+ ** clear of the INTF flag reaches the device
+ ** before the scanning of the DONE queue.
+ */
+ istat = INB (nc_istat);
+ if (istat & INTF) {
+ OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem);
+ istat = INB (nc_istat); /* DUMMY READ */
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("F ");
+ (void)ncr_wakeup_done (np);
+ };
+
+ if (!(istat & (SIP|DIP)))
+ return;
+
+#if 0 /* We should never get this one */
+ if (istat & CABRT)
+ OUTB (nc_istat, CABRT);
+#endif
+
+ /*
+ ** Steinbach's Guideline for Systems Programming:
+ ** Never test for an error condition you don't know how to handle.
+ */
+
+ /*========================================================
+ ** PAR and MA interrupts may occur at the same time,
+ ** and we need to know of both in order to handle
+ ** this situation properly. We try to unstack SCSI
+ ** interrupts for that reason. BTW, I dislike a LOT
+ ** such a loop inside the interrupt routine.
+ ** Even if DMA interrupt stacking is very unlikely to
+ ** happen, we also try unstacking these ones, since
+ ** this has no performance impact.
+ **=========================================================
+ */
+ sist = 0;
+ dstat = 0;
+ istatc = istat;
+ do {
+ if (istatc & SIP)
+ sist |= INW (nc_sist);
+ if (istatc & DIP)
+ dstat |= INB (nc_dstat);
+ istatc = INB (nc_istat);
+ istat |= istatc;
+ } while (istatc & (SIP|DIP));
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printk ("<%d|%x:%x|%x:%x>",
+ (int)INB(nc_scr0),
+ dstat,sist,
+ (unsigned)INL(nc_dsp),
+ (unsigned)INL(nc_dbc));
+
+ /*
+ ** On paper, a memory barrier may be needed here.
+ ** And since we are paranoid ... :)
+ */
+ MEMORY_BARRIER();
+
+ /*========================================================
+ ** First, interrupts we want to service cleanly.
+ **
+ ** Phase mismatch (MA) is the most frequent interrupt
+ ** for chip earlier than the 896 and so we have to service
+ ** it as quickly as possible.
+ ** A SCSI parity error (PAR) may be combined with a phase
+ ** mismatch condition (MA).
+ ** Programmed interrupts (SIR) are used to call the C code
+ ** from SCRIPTS.
+ ** The single step interrupt (SSI) is not used in this
+ ** driver.
+ **=========================================================
+ */
+
+ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if (sist & PAR) ncr_int_par (np, sist);
+ else if (sist & MA) ncr_int_ma (np);
+ else if (dstat & SIR) ncr_int_sir (np);
+ else if (dstat & SSI) OUTONB_STD ();
+ else goto unknown_int;
+ return;
+ };
+
+ /*========================================================
+ ** Now, interrupts that donnot happen in normal
+ ** situations and that we may need to recover from.
+ **
+ ** On SCSI RESET (RST), we reset everything.
+ ** On SCSI BUS MODE CHANGE (SBMC), we complete all
+ ** active CCBs with RESET status, prepare all devices
+ ** for negotiating again and restart the SCRIPTS.
+ ** On STO and UDC, we complete the CCB with the corres-
+ ** ponding status and restart the SCRIPTS.
+ **=========================================================
+ */
+
+ if (sist & RST) {
+ ncr_init (np, 1, bootverbose ? "scsi reset" : NULL, HS_RESET);
+ return;
+ };
+
+ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+
+ if (!(sist & (GEN|HTH|SGE)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if (sist & SBMC) ncr_int_sbmc (np);
+ else if (sist & STO) ncr_int_sto (np);
+ else if (sist & UDC) ncr_int_udc (np);
+ else goto unknown_int;
+ return;
+ };
+
+ /*=========================================================
+ ** Now, interrupts we are not able to recover cleanly.
+ **
+ ** Do the register dump.
+ ** Log message for hard errors.
+ ** Reset everything.
+ **=========================================================
+ */
+ if (ktime_exp(np->regtime)) {
+ np->regtime = ktime_get(10*HZ);
+ for (i = 0; i<sizeof(np->regdump); i++)
+ ((char*)&np->regdump)[i] = INB_OFF(i);
+ np->regdump.nc_dstat = dstat;
+ np->regdump.nc_sist = sist;
+ };
+
+ ncr_log_hard_error(np, sist, dstat);
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ u_char ctest4_o, ctest4_m;
+ u_char shadow;
+
+ /*
+ * Get shadow register data
+ * Write 1 to ctest4
+ */
+ ctest4_o = INB(nc_ctest4);
+
+ OUTB(nc_ctest4, ctest4_o | 0x10);
+
+ ctest4_m = INB(nc_ctest4);
+ shadow = INW_OFF(0x42);
+
+ OUTB(nc_ctest4, ctest4_o);
+
+ printk("%s: ctest4/sist original 0x%x/0x%X mod: 0x%X/0x%x\n",
+ ncr_name(np), ctest4_o, sist, ctest4_m, shadow);
+ }
+
+ if ((sist & (GEN|HTH|SGE)) ||
+ (dstat & (MDPE|BF|ABRT|IID))) {
+ ncr_start_reset(np);
+ return;
+ };
+
+unknown_int:
+ /*=========================================================
+ ** We just miss the cause of the interrupt. :(
+ ** Print a message. The timeout will do the real work.
+ **=========================================================
+ */
+ printk( "%s: unknown interrupt(s) ignored, "
+ "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
+ ncr_name(np), istat, dstat, sist);
+}
+
+
+/*==========================================================
+**
+** generic recovery from scsi interrupt
+**
+**==========================================================
+**
+** The doc says that when the chip gets an SCSI interrupt,
+** it tries to stop in an orderly fashion, by completing
+** an instruction fetch that had started or by flushing
+** the DMA fifo for a write to memory that was executing.
+** Such a fashion is not enough to know if the instruction
+** that was just before the current DSP value has been
+** executed or not.
+**
+** There are 3 small SCRIPTS sections that deal with the
+** start queue and the done queue that may break any
+** assomption from the C code if we are interrupted
+** inside, so we reset if it happens. Btw, since these
+** SCRIPTS sections are executed while the SCRIPTS hasn't
+** started SCSI operations, it is very unlikely to happen.
+**
+** All the driver data structures are supposed to be
+** allocated from the same 4 GB memory window, so there
+** is a 1 to 1 relationship between DSA and driver data
+** structures. Since we are careful :) to invalidate the
+** DSA when we complete a command or when the SCRIPTS
+** pushes a DSA into a queue, we can trust it when it
+** points to a CCB.
+**
+**----------------------------------------------------------
+*/
+static void ncr_recover_scsi_int (ncb_p np, u_char hsts)
+{
+ u_int32 dsp = INL (nc_dsp);
+ u_int32 dsa = INL (nc_dsa);
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+
+ /*
+ ** If we haven't been interrupted inside the SCRIPTS
+ ** critical pathes, we can safely restart the SCRIPTS
+ ** and trust the DSA value if it matches a CCB.
+ */
+ if ((!(dsp > NCB_SCRIPT_PHYS (np, getjob_begin) &&
+ dsp < NCB_SCRIPT_PHYS (np, getjob_end) + 1)) &&
+ (!(dsp > NCB_SCRIPT_PHYS (np, ungetjob) &&
+ dsp < NCB_SCRIPT_PHYS (np, reselect) + 1)) &&
+ (!(dsp > NCB_SCRIPTH_PHYS (np, sel_for_abort) &&
+ dsp < NCB_SCRIPTH_PHYS (np, sel_for_abort_1) + 1)) &&
+ (!(dsp > NCB_SCRIPT_PHYS (np, done) &&
+ dsp < NCB_SCRIPT_PHYS (np, done_end) + 1))) {
+ if (cp) {
+ cp->host_status = hsts;
+ ncr_complete (np, cp);
+ }
+ OUTL (nc_dsa, DSA_INVALID);
+ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
+ }
+ else
+ goto reset_all;
+
+ return;
+
+reset_all:
+ ncr_start_reset(np);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for selection timeout
+**
+**==========================================================
+**
+** There seems to be a bug in the 53c810.
+** Although a STO-Interrupt is pending,
+** it continues executing script commands.
+** But it will fail and interrupt (IID) on
+** the next instruction where it's looking
+** for a valid phase.
+**
+**----------------------------------------------------------
+*/
+
+void ncr_int_sto (ncb_p np)
+{
+ u_int32 dsp = INL (nc_dsp);
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("T");
+
+ if (dsp == NCB_SCRIPT_PHYS (np, wf_sel_done) + 8 ||
+ !(driver_setup.recovery & 1))
+ ncr_recover_scsi_int(np, HS_SEL_TIMEOUT);
+ else
+ ncr_start_reset(np);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for unexpected disconnect
+**
+**==========================================================
+**
+**----------------------------------------------------------
+*/
+void ncr_int_udc (ncb_p np)
+{
+ u_int32 dsa = INL (nc_dsa);
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+ tcb_p tp = &np->target[cp->target];
+
+ /*
+ * Fix Up. Some disks respond to a PPR negotation with
+ * a bus free instead of a message reject.
+ * Disable ppr negotiation if this is first time
+ * tried ppr negotiation.
+ */
+
+ if (tp->ppr_negotiation == 1)
+ tp->ppr_negotiation = 0;
+
+ printk ("%s: unexpected disconnect\n", ncr_name(np));
+ ncr_recover_scsi_int(np, HS_UNEXPECTED);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI bus mode change
+**
+**==========================================================
+**
+** spi2-r12 11.2.3 says a transceiver mode change must
+** generate a reset event and a device that detects a reset
+** event shall initiate a hard reset. It says also that a
+** device that detects a mode change shall set data transfer
+** mode to eight bit asynchronous, etc...
+** So, just resetting should be enough.
+**
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_sbmc (ncb_p np)
+{
+ u_char scsi_mode = INB (nc_stest4) & SMODE;
+
+ printk("%s: SCSI bus mode change from %x to %x.\n",
+ ncr_name(np), np->scsi_mode, scsi_mode);
+
+ np->scsi_mode = scsi_mode;
+
+
+ /*
+ ** Suspend command processing for 1 second and
+ ** reinitialize all except the chip.
+ */
+ np->settle_time = ktime_get(1*HZ);
+ ncr_init (np, 0, bootverbose ? "scsi mode change" : NULL, HS_RESET);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI parity error.
+**
+**==========================================================
+**
+** When the chip detects a SCSI parity error and is
+** currently executing a (CH)MOV instruction, it does
+** not interrupt immediately, but tries to finish the
+** transfer of the current scatter entry before
+** interrupting. The following situations may occur:
+**
+** - The complete scatter entry has been transferred
+** without the device having changed phase.
+** The chip will then interrupt with the DSP pointing
+** to the instruction that follows the MOV.
+**
+** - A phase mismatch occurs before the MOV finished
+** and phase errors are to be handled by the C code.
+** The chip will then interrupt with both PAR and MA
+** conditions set.
+**
+** - A phase mismatch occurs before the MOV finished and
+** phase errors are to be handled by SCRIPTS (895A or 896).
+** The chip will load the DSP with the phase mismatch
+** JUMP address and interrupt the host processor.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_par (ncb_p np, u_short sist)
+{
+ u_char hsts = INB (HS_PRT);
+ u_int32 dsp = INL (nc_dsp);
+ u_int32 dbc = INL (nc_dbc);
+ u_int32 dsa = INL (nc_dsa);
+ u_char sbcl = INB (nc_sbcl);
+ u_char cmd = dbc >> 24;
+ int phase = cmd & 7;
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+
+ printk("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
+ ncr_name(np), hsts, dbc, sbcl);
+
+ /*
+ ** Check that the chip is connected to the SCSI BUS.
+ */
+ if (!(INB (nc_scntl1) & ISCON)) {
+ if (!(driver_setup.recovery & 1)) {
+ ncr_recover_scsi_int(np, HS_FAIL);
+ return;
+ }
+ goto reset_all;
+ }
+
+ /*
+ ** If the nexus is not clearly identified, reset the bus.
+ ** We will try to do better later.
+ */
+ if (!cp)
+ goto reset_all;
+
+ /*
+ ** Check instruction was a MOV, direction was INPUT and
+ ** ATN is asserted.
+ */
+ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
+ goto reset_all;
+
+ /*
+ ** Keep track of the parity error.
+ */
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_PARITY_ERR;
+
+ /*
+ ** Prepare the message to send to the device.
+ */
+ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ /*
+ ** Save error message. For integrity check use only.
+ */
+ if (np->check_integrity)
+ np->check_integ_par = np->msgout[0];
+#endif
+
+ /*
+ ** If the old phase was DATA IN or DT DATA IN phase,
+ ** we have to deal with the 3 situations described above.
+ ** For other input phases (MSG IN and STATUS), the device
+ ** must resend the whole thing that failed parity checking
+ ** or signal error. So, jumping to dispatcher should be OK.
+ */
+ if ((phase == 1) || (phase == 5)) {
+ /* Phase mismatch handled by SCRIPTS */
+ if (dsp == NCB_SCRIPTH_PHYS (np, pm_handle))
+ OUTL_DSP (dsp);
+ /* Phase mismatch handled by the C code */
+ else if (sist & MA)
+ ncr_int_ma (np);
+ /* No phase mismatch occurred */
+ else {
+ OUTL (nc_temp, dsp);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, dispatch));
+ }
+ }
+ else
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ return;
+
+reset_all:
+ ncr_start_reset(np);
+ return;
+}
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for phase errors.
+**
+**
+**==========================================================
+**
+** We have to construct a new transfer descriptor,
+** to transfer the rest of the current block.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_ma (ncb_p np)
+{
+ u_int32 dbc;
+ u_int32 rest;
+ u_int32 dsp;
+ u_int32 dsa;
+ u_int32 nxtdsp;
+ u_int32 *vdsp;
+ u_int32 oadr, olen;
+ u_int32 *tblp;
+ u_int32 newcmd;
+ u_int delta;
+ u_char cmd;
+ u_char hflags, hflags0;
+ struct pm_ctx *pm;
+ ccb_p cp;
+
+ dsp = INL (nc_dsp);
+ dbc = INL (nc_dbc);
+ dsa = INL (nc_dsa);
+
+ cmd = dbc >> 24;
+ rest = dbc & 0xffffff;
+ delta = 0;
+
+ /*
+ ** locate matching cp.
+ */
+ cp = ncr_ccb_from_dsa(np, dsa);
+
+ if (DEBUG_FLAGS & DEBUG_PHASE)
+ printk("CCB = %2x %2x %2x %2x %2x %2x\n",
+ cp->cmd->cmnd[0], cp->cmd->cmnd[1], cp->cmd->cmnd[2],
+ cp->cmd->cmnd[3], cp->cmd->cmnd[4], cp->cmd->cmnd[5]);
+
+ /*
+ ** Donnot take into account dma fifo and various buffers in
+ ** INPUT phase since the chip flushes everything before
+ ** raising the MA interrupt for interrupted INPUT phases.
+ ** For DATA IN phase, we will check for the SWIDE later.
+ */
+ if ((cmd & 7) != 1 && (cmd & 7) != 5) {
+ u_int32 dfifo;
+ u_char ss0, ss2;
+
+ /*
+ ** If C1010, DFBC contains number of bytes in DMA fifo.
+ ** else read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ delta = INL(nc_dfbc) & 0xffff;
+ else {
+ dfifo = INL(nc_dfifo);
+
+ /*
+ ** Calculate remaining bytes in DMA fifo.
+ ** C1010 - always large fifo, value in dfbc
+ ** Otherwise, (CTEST5 = dfifo >> 16)
+ */
+ if (dfifo & (DFS << 16))
+ delta = ((((dfifo >> 8) & 0x300) |
+ (dfifo & 0xff)) - rest) & 0x3ff;
+ else
+ delta = ((dfifo & 0xff) - rest) & 0x7f;
+
+ /*
+ ** The data in the dma fifo has not been
+ ** transferred to the target -> add the amount
+ ** to the rest and clear the data.
+ ** Check the sstat2 register in case of wide
+ ** transfer.
+ */
+
+ }
+
+ rest += delta;
+ ss0 = INB (nc_sstat0);
+ if (ss0 & OLF) rest++;
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) && (ss0 & ORF))
+ rest++;
+ if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
+ ss2 = INB (nc_sstat2);
+ if (ss2 & OLF1) rest++;
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) && (ss2 & ORF))
+ rest++;
+ };
+
+ /*
+ ** Clear fifos.
+ */
+ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* scsi fifo */
+ }
+
+ /*
+ ** log the information
+ */
+
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printk ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7,
+ (unsigned) rest, (unsigned) delta);
+
+ /*
+ ** try to find the interrupted script command,
+ ** and the address at which to continue.
+ */
+ vdsp = 0;
+ nxtdsp = 0;
+ if (dsp > np->p_script &&
+ dsp <= np->p_script + sizeof(struct script)) {
+ vdsp = (u_int32 *)((char*)np->script0 + (dsp-np->p_script-8));
+ nxtdsp = dsp;
+ }
+ else if (dsp > np->p_scripth &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ vdsp = (u_int32 *)((char*)np->scripth0 + (dsp-np->p_scripth-8));
+ nxtdsp = dsp;
+ }
+
+ /*
+ ** log the information
+ */
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printk ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
+ cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
+ };
+
+ if (!vdsp) {
+ printk ("%s: interrupted SCRIPT address not found.\n",
+ ncr_name (np));
+ goto reset_all;
+ }
+
+ if (!cp) {
+ printk ("%s: SCSI phase error fixup: CCB already dequeued.\n",
+ ncr_name (np));
+ goto reset_all;
+ }
+
+ /*
+ ** get old startaddress and old length.
+ */
+
+ oadr = scr_to_cpu(vdsp[1]);
+
+ if (cmd & 0x10) { /* Table indirect */
+ tblp = (u_int32 *) ((char*) &cp->phys + oadr);
+ olen = scr_to_cpu(tblp[0]);
+ oadr = scr_to_cpu(tblp[1]);
+ } else {
+ tblp = (u_int32 *) 0;
+ olen = scr_to_cpu(vdsp[0]) & 0xffffff;
+ };
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printk ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
+ (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
+ tblp,
+ (unsigned) olen,
+ (unsigned) oadr);
+ };
+
+ /*
+ ** check cmd against assumed interrupted script command.
+ ** If dt data phase, the MOVE instruction hasn't bit 4 of
+ ** the phase.
+ */
+
+ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
+ PRINT_ADDR(cp->cmd);
+ printk ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
+ (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
+
+ goto reset_all;
+ };
+
+ /*
+ ** if old phase not dataphase, leave here.
+ ** C/D line is low if data.
+ */
+
+ if (cmd & 0x02) {
+ PRINT_ADDR(cp->cmd);
+ printk ("phase change %x-%x %d@%08x resid=%d.\n",
+ cmd&7, INB(nc_sbcl)&7, (unsigned)olen,
+ (unsigned)oadr, (unsigned)rest);
+ goto unexpected_phase;
+ };
+
+ /*
+ ** Choose the correct PM save area.
+ **
+ ** Look at the PM_SAVE SCRIPT if you want to understand
+ ** this stuff. The equivalent code is implemented in
+ ** SCRIPTS for the 895A and 896 that are able to handle
+ ** PM from the SCRIPTS processor.
+ */
+
+ hflags0 = INB (HF_PRT);
+ hflags = hflags0;
+
+ if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
+ if (hflags & HF_IN_PM0)
+ nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
+ else if (hflags & HF_IN_PM1)
+ nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
+
+ if (hflags & HF_DP_SAVED)
+ hflags ^= HF_ACT_PM;
+ }
+
+ if (!(hflags & HF_ACT_PM)) {
+ pm = &cp->phys.pm0;
+ newcmd = NCB_SCRIPT_PHYS(np, pm0_data);
+ }
+ else {
+ pm = &cp->phys.pm1;
+ newcmd = NCB_SCRIPT_PHYS(np, pm1_data);
+ }
+
+ hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
+ if (hflags != hflags0)
+ OUTB (HF_PRT, hflags);
+
+ /*
+ ** fillin the phase mismatch context
+ */
+
+ pm->sg.addr = cpu_to_scr(oadr + olen - rest);
+ pm->sg.size = cpu_to_scr(rest);
+ pm->ret = cpu_to_scr(nxtdsp);
+
+ /*
+ ** If we have a SWIDE,
+ ** - prepare the address to write the SWIDE from SCRIPTS,
+ ** - compute the SCRIPTS address to restart from,
+ ** - move current data pointer context by one byte.
+ */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ if ( ((cmd & 7) == 1 || (cmd & 7) == 5)
+ && cp && (cp->phys.select.sel_scntl3 & EWS) &&
+ (INB (nc_scntl2) & WSR)) {
+ u32 tmp;
+
+#ifdef SYM_DEBUG_PM_WITH_WSR
+ PRINT_ADDR(cp);
+ printf ("MA interrupt with WSR set - "
+ "pm->sg.addr=%x - pm->sg.size=%d\n",
+ pm->sg.addr, pm->sg.size);
+#endif
+ /*
+ * Set up the table indirect for the MOVE
+ * of the residual byte and adjust the data
+ * pointer context.
+ */
+ tmp = scr_to_cpu(pm->sg.addr);
+ cp->phys.wresid.addr = cpu_to_scr(tmp);
+ pm->sg.addr = cpu_to_scr(tmp + 1);
+ tmp = scr_to_cpu(pm->sg.size);
+ cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
+ pm->sg.size = cpu_to_scr(tmp - 1);
+
+ /*
+ * If only the residual byte is to be moved,
+ * no PM context is needed.
+ */
+ if ((tmp&0xffffff) == 1)
+ newcmd = pm->ret;
+
+ /*
+ * Prepare the address of SCRIPTS that will
+ * move the residual byte to memory.
+ */
+ nxtdsp = NCB_SCRIPTH_PHYS (np, wsr_ma_helper);
+ }
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ PRINT_ADDR(cp->cmd);
+ printk ("PM %x %x %x / %x %x %x.\n",
+ hflags0, hflags, newcmd,
+ (unsigned)scr_to_cpu(pm->sg.addr),
+ (unsigned)scr_to_cpu(pm->sg.size),
+ (unsigned)scr_to_cpu(pm->ret));
+ }
+
+ /*
+ ** Restart the SCRIPTS processor.
+ */
+
+ OUTL (nc_temp, newcmd);
+ OUTL_DSP (nxtdsp);
+ return;
+
+ /*
+ ** Unexpected phase changes that occurs when the current phase
+ ** is not a DATA IN or DATA OUT phase are due to error conditions.
+ ** Such event may only happen when the SCRIPTS is using a
+ ** multibyte SCSI MOVE.
+ **
+ ** Phase change Some possible cause
+ **
+ ** COMMAND --> MSG IN SCSI parity error detected by target.
+ ** COMMAND --> STATUS Bad command or refused by target.
+ ** MSG OUT --> MSG IN Message rejected by target.
+ ** MSG OUT --> COMMAND Bogus target that discards extended
+ ** negotiation messages.
+ **
+ ** The code below does not care of the new phase and so
+ ** trusts the target. Why to annoy it ?
+ ** If the interrupted phase is COMMAND phase, we restart at
+ ** dispatcher.
+ ** If a target does not get all the messages after selection,
+ ** the code assumes blindly that the target discards extended
+ ** messages and clears the negotiation status.
+ ** If the target does not want all our response to negotiation,
+ ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
+ ** bloat for such a should_not_happen situation).
+ ** In all other situation, we reset the BUS.
+ ** Are these assumptions reasonnable ? (Wait and see ...)
+ */
+unexpected_phase:
+ dsp -= 8;
+ nxtdsp = 0;
+
+ switch (cmd & 7) {
+ case 2: /* COMMAND phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ break;
+#if 0
+ case 3: /* STATUS phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ break;
+#endif
+ case 6: /* MSG OUT phase */
+ /*
+ ** If the device may want to use untagged when we want
+ ** tagged, we prepare an IDENTIFY without disc. granted,
+ ** since we will not be able to handle reselect.
+ ** Otherwise, we just don't care.
+ */
+ if (dsp == NCB_SCRIPT_PHYS (np, send_ident)) {
+ if (cp->tag != NO_TAG && olen - rest <= 3) {
+ cp->host_status = HS_BUSY;
+ np->msgout[0] = M_IDENTIFY | cp->lun;
+ nxtdsp = NCB_SCRIPTH_PHYS (np, ident_break_atn);
+ }
+ else
+ nxtdsp = NCB_SCRIPTH_PHYS (np, ident_break);
+ }
+ else if (dsp == NCB_SCRIPTH_PHYS (np, send_wdtr) ||
+ dsp == NCB_SCRIPTH_PHYS (np, send_sdtr) ||
+ dsp == NCB_SCRIPTH_PHYS (np, send_ppr)) {
+ nxtdsp = NCB_SCRIPTH_PHYS (np, nego_bad_phase);
+ }
+ break;
+#if 0
+ case 7: /* MSG IN phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, clrack);
+ break;
+#endif
+ }
+
+ if (nxtdsp) {
+ OUTL_DSP (nxtdsp);
+ return;
+ }
+
+reset_all:
+ ncr_start_reset(np);
+}
+
+/*==========================================================
+**
+** ncr chip handler for QUEUE FULL and CHECK CONDITION
+**
+**==========================================================
+**
+** On QUEUE FULL status, we set the actual tagged command
+** queue depth to the number of disconnected CCBs that is
+** hopefully a good value to avoid further QUEUE FULL.
+**
+** On CHECK CONDITION or COMMAND TERMINATED, we use the
+** CCB of the failed command for performing a REQUEST
+** SENSE SCSI command.
+**
+** We do not want to change the order commands will be
+** actually queued to the device after we received a
+** QUEUE FULL status. We also want to properly deal with
+** contingent allegiance condition. For these reasons,
+** we remove from the start queue all commands for this
+** LUN that haven't been yet queued to the device and
+** put them back in the correponding LUN queue, then
+** requeue the CCB that failed in front of the LUN queue.
+** I just hope this not to be performed too often. :)
+**
+** If we are using IMMEDIATE ARBITRATION, we clear the
+** IARB hint for every commands we encounter in order not
+** to be stuck with a won arbitration and no job to queue
+** to a device.
+**----------------------------------------------------------
+*/
+
+static void ncr_sir_to_redo(ncb_p np, int num, ccb_p cp)
+{
+ Scsi_Cmnd *cmd = cp->cmd;
+ tcb_p tp = &np->target[cp->target];
+ lcb_p lp = ncr_lp(np, tp, cp->lun);
+ ccb_p cp2;
+ int busyccbs = 1;
+ u_int32 startp;
+ u_char s_status = INB (SS_PRT);
+ int msglen;
+ int i, j;
+
+
+ /*
+ ** If the LCB is not yet available, then only
+ ** 1 IO is accepted, so we should have it.
+ */
+ if (!lp)
+ goto next;
+ /*
+ ** Remove all CCBs queued to the chip for that LUN and put
+ ** them back in the LUN CCB wait queue.
+ */
+ busyccbs = lp->queuedccbs;
+ i = (INL (nc_scratcha) - np->p_squeue) / 4;
+ j = i;
+ while (i != np->squeueput) {
+ cp2 = ncr_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
+ assert(cp2);
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /* IARB hints may not be relevant any more. Forget them. */
+ cp2->host_flags &= ~HF_HINT_IARB;
+#endif
+ if (cp2 && cp2->target == cp->target && cp2->lun == cp->lun) {
+ xpt_remque(&cp2->link_ccbq);
+ xpt_insque_head(&cp2->link_ccbq, &lp->wait_ccbq);
+ --lp->queuedccbs;
+ cp2->queued = 0;
+ }
+ else {
+ if (i != j)
+ np->squeue[j] = np->squeue[i];
+ if ((j += 2) >= MAX_START*2) j = 0;
+ }
+ if ((i += 2) >= MAX_START*2) i = 0;
+ }
+ if (i != j) /* Copy back the idle task if needed */
+ np->squeue[j] = np->squeue[i];
+ np->squeueput = j; /* Update our current start queue pointer */
+
+ /*
+ ** Requeue the interrupted CCB in front of the
+ ** LUN CCB wait queue to preserve ordering.
+ */
+ xpt_remque(&cp->link_ccbq);
+ xpt_insque_head(&cp->link_ccbq, &lp->wait_ccbq);
+ --lp->queuedccbs;
+ cp->queued = 0;
+
+next:
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /* IARB hint may not be relevant any more. Forget it. */
+ cp->host_flags &= ~HF_HINT_IARB;
+ if (np->last_cp)
+ np->last_cp = 0;
+#endif
+
+ /*
+ ** Now we can restart the SCRIPTS processor safely.
+ */
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
+
+ switch(s_status) {
+ default:
+ case S_BUSY:
+ ncr_complete(np, cp);
+ break;
+ case S_QUEUE_FULL:
+ if (!lp || !lp->queuedccbs) {
+ ncr_complete(np, cp);
+ break;
+ }
+ if (bootverbose >= 1) {
+ PRINT_ADDR(cmd);
+ printk ("QUEUE FULL! %d busy, %d disconnected CCBs\n",
+ busyccbs, lp->queuedccbs);
+ }
+ /*
+ ** Decrease number of tags to the number of
+ ** disconnected commands.
+ */
+ if (lp->queuedccbs < lp->numtags) {
+ lp->numtags = lp->queuedccbs;
+ lp->num_good = 0;
+ ncr_setup_tags (np, cp->target, cp->lun);
+ }
+ /*
+ ** Repair the offending CCB.
+ */
+ cp->phys.header.savep = cp->startp;
+ cp->phys.header.lastp = cp->lastp0;
+ cp->host_status = HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->xerr_status = 0;
+ cp->extra_bytes = 0;
+ cp->host_flags &= (HF_PM_TO_C|HF_DATA_IN);
+
+ break;
+
+ case S_TERMINATED:
+ case S_CHECK_COND:
+ /*
+ ** If we were requesting sense, give up.
+ */
+ if (cp->host_flags & HF_AUTO_SENSE) {
+ ncr_complete(np, cp);
+ break;
+ }
+
+ /*
+ ** Save SCSI status and extended error.
+ ** Compute the data residual now.
+ */
+ cp->sv_scsi_status = cp->scsi_status;
+ cp->sv_xerr_status = cp->xerr_status;
+ cp->resid = ncr_compute_residual(np, cp);
+
+ /*
+ ** Device returned CHECK CONDITION status.
+ ** Prepare all needed data strutures for getting
+ ** sense data.
+ */
+
+ /*
+ ** identify message
+ */
+ cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun;
+ msglen = 1;
+
+ /*
+ ** If we are currently using anything different from
+ ** async. 8 bit data transfers with that target,
+ ** start a negotiation, since the device may want
+ ** to report us a UNIT ATTENTION condition due to
+ ** a cause we currently ignore, and we donnot want
+ ** to be stuck with WIDE and/or SYNC data transfer.
+ **
+ ** cp->nego_status is filled by ncr_prepare_nego().
+ **
+ ** Do NOT negotiate if performing integrity check
+ ** or if integrity check has completed, all check
+ ** conditions will have been cleared.
+ */
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if (DEBUG_FLAGS & DEBUG_IC) {
+ printk("%s: ncr_sir_to_redo: ic_done %2X, in_progress %2X\n",
+ ncr_name(np), tp->ic_done, cp->cmd->ic_in_progress);
+ }
+
+ /*
+ ** If parity error during integrity check,
+ ** set the target width to narrow. Otherwise,
+ ** do not negotiate on a request sense.
+ */
+ if ( np->check_integ_par && np->check_integrity
+ && cp->cmd->ic_in_progress ) {
+ cp->nego_status = 0;
+ msglen +=
+ ncr_ic_nego (np, cp, cmd ,&cp->scsi_smsg2[msglen]);
+ }
+
+ if (!np->check_integrity ||
+ (np->check_integrity &&
+ (!cp->cmd->ic_in_progress && !tp->ic_done)) ) {
+ ncr_negotiate(np, tp);
+ cp->nego_status = 0;
+ {
+ u_char sync_offset;
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ sync_offset = tp->sval & 0x3f;
+ else
+ sync_offset = tp->sval & 0x1f;
+
+ if ((tp->wval & EWS) || sync_offset)
+ msglen +=
+ ncr_prepare_nego (np, cp, &cp->scsi_smsg2[msglen]);
+ }
+
+ }
+#else
+ ncr_negotiate(np, tp);
+ cp->nego_status = 0;
+ if ((tp->wval & EWS) || (tp->sval & 0x1f))
+ msglen +=
+ ncr_prepare_nego (np, cp, &cp->scsi_smsg2[msglen]);
+#endif /* SCSI_NCR_INTEGRITY_CHECKING */
+
+ /*
+ ** Message table indirect structure.
+ */
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2));
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ /*
+ ** sense command
+ */
+ cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd));
+ cp->phys.cmd.size = cpu_to_scr(6);
+
+ /*
+ ** patch requested size into sense command
+ */
+ cp->sensecmd[0] = 0x03;
+ cp->sensecmd[1] = cp->lun << 5;
+ cp->sensecmd[4] = sizeof(cp->sense_buf);
+
+ /*
+ ** sense data
+ */
+ bzero(cp->sense_buf, sizeof(cp->sense_buf));
+ cp->phys.sense.addr = cpu_to_scr(CCB_PHYS(cp,sense_buf[0]));
+ cp->phys.sense.size = cpu_to_scr(sizeof(cp->sense_buf));
+
+ /*
+ ** requeue the command.
+ */
+ startp = NCB_SCRIPTH_PHYS (np, sdata_in);
+
+ cp->phys.header.savep = cpu_to_scr(startp);
+ cp->phys.header.goalp = cpu_to_scr(startp + 16);
+ cp->phys.header.lastp = cpu_to_scr(startp);
+ cp->phys.header.wgoalp = cpu_to_scr(startp + 16);
+ cp->phys.header.wlastp = cpu_to_scr(startp);
+
+ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->host_flags = (HF_AUTO_SENSE|HF_DATA_IN);
+
+ cp->phys.header.go.start =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
+
+ /*
+ ** If lp not yet allocated, requeue the command.
+ */
+ if (!lp)
+ ncr_put_start_queue(np, cp);
+ break;
+ }
+
+ /*
+ ** requeue awaiting scsi commands for this lun.
+ */
+ if (lp)
+ ncr_start_next_ccb(np, lp, 1);
+
+ return;
+}
+
+/*----------------------------------------------------------
+**
+** After a device has accepted some management message
+** as BUS DEVICE RESET, ABORT TASK, etc ..., or when
+** a device signals a UNIT ATTENTION condition, some
+** tasks are thrown away by the device. We are required
+** to reflect that on our tasks list since the device
+** will never complete these tasks.
+**
+** This function completes all disconnected CCBs for a
+** given target that matches the following criteria:
+** - lun=-1 means any logical UNIT otherwise a given one.
+** - task=-1 means any task, otherwise a given one.
+**----------------------------------------------------------
+*/
+static int ncr_clear_tasks(ncb_p np, u_char hsts,
+ int target, int lun, int task)
+{
+ int i = 0;
+ ccb_p cp;
+
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->target != target)
+ continue;
+ if (lun != -1 && cp->lun != lun)
+ continue;
+ if (task != -1 && cp->tag != NO_TAG && cp->scsi_smsg[2] != task)
+ continue;
+ cp->host_status = hsts;
+ cp->scsi_status = S_ILLEGAL;
+ ncr_complete(np, cp);
+ ++i;
+ }
+ return i;
+}
+
+/*==========================================================
+**
+** ncr chip handler for TASKS recovery.
+**
+**==========================================================
+**
+** We cannot safely abort a command, while the SCRIPTS
+** processor is running, since we just would be in race
+** with it.
+**
+** As long as we have tasks to abort, we keep the SEM
+** bit set in the ISTAT. When this bit is set, the
+** SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
+** each time it enters the scheduler.
+**
+** If we have to reset a target, clear tasks of a unit,
+** or to perform the abort of a disconnected job, we
+** restart the SCRIPTS for selecting the target. Once
+** selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
+** If it loses arbitration, the SCRIPTS will interrupt again
+** the next time it will enter its scheduler, and so on ...
+**
+** On SIR_TARGET_SELECTED, we scan for the more
+** appropriate thing to do:
+**
+** - If nothing, we just sent a M_ABORT message to the
+** target to get rid of the useless SCSI bus ownership.
+** According to the specs, no tasks shall be affected.
+** - If the target is to be reset, we send it a M_RESET
+** message.
+** - If a logical UNIT is to be cleared , we send the
+** IDENTIFY(lun) + M_ABORT.
+** - If an untagged task is to be aborted, we send the
+** IDENTIFY(lun) + M_ABORT.
+** - If a tagged task is to be aborted, we send the
+** IDENTIFY(lun) + task attributes + M_ABORT_TAG.
+**
+** Once our 'kiss of death' :) message has been accepted
+** by the target, the SCRIPTS interrupts again
+** (SIR_ABORT_SENT). On this interrupt, we complete
+** all the CCBs that should have been aborted by the
+** target according to our message.
+**
+**----------------------------------------------------------
+*/
+static void ncr_sir_task_recovery(ncb_p np, int num)
+{
+ ccb_p cp;
+ tcb_p tp;
+ int target=-1, lun=-1, task;
+ int i, k;
+ u_char *p;
+
+ switch(num) {
+ /*
+ ** The SCRIPTS processor stopped before starting
+ ** the next command in order to allow us to perform
+ ** some task recovery.
+ */
+ case SIR_SCRIPT_STOPPED:
+
+ /*
+ ** Do we have any target to reset or unit to clear ?
+ */
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ tp = &np->target[i];
+ if (tp->to_reset || (tp->l0p && tp->l0p->to_clear)) {
+ target = i;
+ break;
+ }
+ if (!tp->lmp)
+ continue;
+ for (k = 1 ; k < MAX_LUN ; k++) {
+ if (tp->lmp[k] && tp->lmp[k]->to_clear) {
+ target = i;
+ break;
+ }
+ }
+ if (target != -1)
+ break;
+ }
+
+ /*
+ ** If not, look at the CCB list for any
+ ** disconnected CCB to be aborted.
+ */
+ if (target == -1) {
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->to_abort) {
+ target = cp->target;
+ break;
+ }
+ }
+ }
+
+ /*
+ ** If some target is to be selected,
+ ** prepare and start the selection.
+ */
+ if (target != -1) {
+ tp = &np->target[target];
+ np->abrt_sel.sel_id = target;
+ np->abrt_sel.sel_scntl3 = tp->wval;
+ np->abrt_sel.sel_sxfer = tp->sval;
+ np->abrt_sel.sel_scntl4 = tp->uval;
+ OUTL(nc_dsa, np->p_ncb);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sel_for_abort));
+ return;
+ }
+
+ /*
+ ** Nothing is to be selected, so we donnot need
+ ** to synchronize with the SCRIPTS anymore.
+ ** Remove the SEM flag from the ISTAT.
+ */
+ np->istat_sem = 0;
+ OUTB (nc_istat, SIGP);
+
+ /*
+ ** Now look at CCBs to abort that haven't started yet.
+ ** Remove all those CCBs from the start queue and
+ ** complete them with appropriate status.
+ ** Btw, the SCRIPTS processor is still stopped, so
+ ** we are not in race.
+ */
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_BUSY &&
+ cp->host_status != HS_NEGOTIATE)
+ continue;
+ if (!cp->to_abort)
+ continue;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If we are using IMMEDIATE ARBITRATION, we donnot
+ ** want to cancel the last queued CCB, since the
+ ** SCRIPTS may have anticipated the selection.
+ */
+ if (cp == np->last_cp) {
+ cp->to_abort = 0;
+ continue;
+ }
+#endif
+ /*
+ ** Compute index of next position in the start
+ ** queue the SCRIPTS will schedule.
+ */
+ i = (INL (nc_scratcha) - np->p_squeue) / 4;
+
+ /*
+ ** Remove the job from the start queue.
+ */
+ k = -1;
+ while (1) {
+ if (i == np->squeueput)
+ break;
+ if (k == -1) { /* Not found yet */
+ if (cp == ncr_ccb_from_dsa(np,
+ scr_to_cpu(np->squeue[i])))
+ k = i; /* Found */
+ }
+ else {
+ /*
+ ** Once found, we have to move
+ ** back all jobs by 1 position.
+ */
+ np->squeue[k] = np->squeue[i];
+ k += 2;
+ if (k >= MAX_START*2)
+ k = 0;
+ }
+
+ i += 2;
+ if (i >= MAX_START*2)
+ i = 0;
+ }
+ if (k != -1) {
+ np->squeue[k] = np->squeue[i]; /* Idle task */
+ np->squeueput = k; /* Start queue pointer */
+ }
+ cp->host_status = HS_ABORTED;
+ cp->scsi_status = S_ILLEGAL;
+ ncr_complete(np, cp);
+ }
+ break;
+ /*
+ ** The SCRIPTS processor has selected a target
+ ** we may have some manual recovery to perform for.
+ */
+ case SIR_TARGET_SELECTED:
+ target = (INB (nc_sdid) & 0xf);
+ tp = &np->target[target];
+
+ np->abrt_tbl.addr = vtobus(np->abrt_msg);
+
+ /*
+ ** If the target is to be reset, prepare a
+ ** M_RESET message and clear the to_reset flag
+ ** since we donnot expect this operation to fail.
+ */
+ if (tp->to_reset) {
+ np->abrt_msg[0] = M_RESET;
+ np->abrt_tbl.size = 1;
+ tp->to_reset = 0;
+ break;
+ }
+
+ /*
+ ** Otherwise, look for some logical unit to be cleared.
+ */
+ if (tp->l0p && tp->l0p->to_clear)
+ lun = 0;
+ else if (tp->lmp) {
+ for (k = 1 ; k < MAX_LUN ; k++) {
+ if (tp->lmp[k] && tp->lmp[k]->to_clear) {
+ lun = k;
+ break;
+ }
+ }
+ }
+
+ /*
+ ** If a logical unit is to be cleared, prepare
+ ** an IDENTIFY(lun) + ABORT MESSAGE.
+ */
+ if (lun != -1) {
+ lcb_p lp = ncr_lp(np, tp, lun);
+ lp->to_clear = 0; /* We donnot expect to fail here */
+ np->abrt_msg[0] = M_IDENTIFY | lun;
+ np->abrt_msg[1] = M_ABORT;
+ np->abrt_tbl.size = 2;
+ break;
+ }
+
+ /*
+ ** Otherwise, look for some disconnected job to
+ ** abort for this target.
+ */
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->target != target)
+ continue;
+ if (cp->to_abort)
+ break;
+ }
+
+ /*
+ ** If we have none, probably since the device has
+ ** completed the command before we won abitration,
+ ** send a M_ABORT message without IDENTIFY.
+ ** According to the specs, the device must just
+ ** disconnect the BUS and not abort any task.
+ */
+ if (!cp) {
+ np->abrt_msg[0] = M_ABORT;
+ np->abrt_tbl.size = 1;
+ break;
+ }
+
+ /*
+ ** We have some task to abort.
+ ** Set the IDENTIFY(lun)
+ */
+ np->abrt_msg[0] = M_IDENTIFY | cp->lun;
+
+ /*
+ ** If we want to abort an untagged command, we
+ ** will send a IDENTIFY + M_ABORT.
+ ** Otherwise (tagged command), we will send
+ ** a IDENTITFY + task attributes + ABORT TAG.
+ */
+ if (cp->tag == NO_TAG) {
+ np->abrt_msg[1] = M_ABORT;
+ np->abrt_tbl.size = 2;
+ }
+ else {
+ np->abrt_msg[1] = cp->scsi_smsg[1];
+ np->abrt_msg[2] = cp->scsi_smsg[2];
+ np->abrt_msg[3] = M_ABORT_TAG;
+ np->abrt_tbl.size = 4;
+ }
+ cp->to_abort = 0; /* We donnot expect to fail here */
+ break;
+
+ /*
+ ** The target has accepted our message and switched
+ ** to BUS FREE phase as we expected.
+ */
+ case SIR_ABORT_SENT:
+ target = (INB (nc_sdid) & 0xf);
+ tp = &np->target[target];
+
+ /*
+ ** If we didn't abort anything, leave here.
+ */
+ if (np->abrt_msg[0] == M_ABORT)
+ break;
+
+ /*
+ ** If we sent a M_RESET, then a hardware reset has
+ ** been performed by the target.
+ ** - Reset everything to async 8 bit
+ ** - Tell ourself to negotiate next time :-)
+ ** - Prepare to clear all disconnected CCBs for
+ ** this target from our task list (lun=task=-1)
+ */
+ lun = -1;
+ task = -1;
+ if (np->abrt_msg[0] == M_RESET) {
+ tp->sval = 0;
+ tp->wval = np->rv_scntl3;
+ tp->uval = np->rv_scntl4;
+ ncr_set_sync_wide_status(np, target);
+ ncr_negotiate(np, tp);
+ }
+
+ /*
+ ** Otherwise, check for the LUN and TASK(s)
+ ** concerned by the cancelation.
+ ** If it is not ABORT_TAG then it is CLEAR_QUEUE
+ ** or an ABORT message :-)
+ */
+ else {
+ lun = np->abrt_msg[0] & 0x3f;
+ if (np->abrt_msg[1] == M_ABORT_TAG)
+ task = np->abrt_msg[2];
+ }
+
+ /*
+ ** Complete all the CCBs the device should have
+ ** aborted due to our 'kiss of death' message.
+ */
+ (void) ncr_clear_tasks(np, HS_ABORTED, target, lun, task);
+ break;
+
+ /*
+ ** We have performed a auto-sense that succeeded.
+ ** If the device reports a UNIT ATTENTION condition
+ ** due to a RESET condition, we must complete all
+ ** disconnect CCBs for this unit since the device
+ ** shall have thrown them away.
+ ** Since I haven't time to guess what the specs are
+ ** expecting for other UNIT ATTENTION conditions, I
+ ** decided to only care about RESET conditions. :)
+ */
+ case SIR_AUTO_SENSE_DONE:
+ cp = ncr_ccb_from_dsa(np, INL (nc_dsa));
+ if (!cp)
+ break;
+ memcpy(cp->cmd->sense_buffer, cp->sense_buf,
+ sizeof(cp->cmd->sense_buffer));
+ p = &cp->cmd->sense_buffer[0];
+
+ if (p[0] != 0x70 || p[2] != 0x6 || p[12] != 0x29)
+ break;
+#if 0
+ (void) ncr_clear_tasks(np, HS_RESET, cp->target, cp->lun, -1);
+#endif
+ break;
+ }
+
+ /*
+ ** Print to the log the message we intend to send.
+ */
+ if (num == SIR_TARGET_SELECTED) {
+ PRINT_TARGET(np, target);
+ ncr_printl_hex("control msgout:", np->abrt_msg,
+ np->abrt_tbl.size);
+ np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
+ }
+
+ /*
+ ** Let the SCRIPTS processor continue.
+ */
+ OUTONB_STD ();
+}
+
+
+/*==========================================================
+**
+** Gérard's alchemy:) that deals with with the data
+** pointer for both MDP and the residual calculation.
+**
+**==========================================================
+**
+** I didn't want to bloat the code by more than 200
+** lignes for the handling of both MDP and the residual.
+** This has been achieved by using a data pointer
+** representation consisting in an index in the data
+** array (dp_sg) and a negative offset (dp_ofs) that
+** have the following meaning:
+**
+** - dp_sg = MAX_SCATTER
+** we are at the end of the data script.
+** - dp_sg < MAX_SCATTER
+** dp_sg points to the next entry of the scatter array
+** we want to transfer.
+** - dp_ofs < 0
+** dp_ofs represents the residual of bytes of the
+** previous entry scatter entry we will send first.
+** - dp_ofs = 0
+** no residual to send first.
+**
+** The function ncr_evaluate_dp() accepts an arbitray
+** offset (basically from the MDP message) and returns
+** the corresponding values of dp_sg and dp_ofs.
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_evaluate_dp(ncb_p np, ccb_p cp, u_int32 scr, int *ofs)
+{
+ u_int32 dp_scr;
+ int dp_ofs, dp_sg, dp_sgmin;
+ int tmp;
+ struct pm_ctx *pm;
+
+ /*
+ ** Compute the resulted data pointer in term of a script
+ ** address within some DATA script and a signed byte offset.
+ */
+ dp_scr = scr;
+ dp_ofs = *ofs;
+ if (dp_scr == NCB_SCRIPT_PHYS (np, pm0_data))
+ pm = &cp->phys.pm0;
+ else if (dp_scr == NCB_SCRIPT_PHYS (np, pm1_data))
+ pm = &cp->phys.pm1;
+ else
+ pm = 0;
+
+ if (pm) {
+ dp_scr = scr_to_cpu(pm->ret);
+ dp_ofs -= scr_to_cpu(pm->sg.size);
+ }
+
+ /*
+ ** Deduce the index of the sg entry.
+ ** Keep track of the index of the first valid entry.
+ ** If result is dp_sg = MAX_SCATTER, then we are at the
+ ** end of the data and vice-versa.
+ */
+ tmp = scr_to_cpu(cp->phys.header.goalp);
+ dp_sg = MAX_SCATTER;
+ if (dp_scr != tmp)
+ dp_sg -= (tmp - 8 - (int)dp_scr) / (SCR_SG_SIZE*4);
+ dp_sgmin = MAX_SCATTER - cp->segments;
+
+ /*
+ ** Move to the sg entry the data pointer belongs to.
+ **
+ ** If we are inside the data area, we expect result to be:
+ **
+ ** Either,
+ ** dp_ofs = 0 and dp_sg is the index of the sg entry
+ ** the data pointer belongs to (or the end of the data)
+ ** Or,
+ ** dp_ofs < 0 and dp_sg is the index of the sg entry
+ ** the data pointer belongs to + 1.
+ */
+ if (dp_ofs < 0) {
+ int n;
+ while (dp_sg > dp_sgmin) {
+ --dp_sg;
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ n = dp_ofs + (tmp & 0xffffff);
+ if (n > 0) {
+ ++dp_sg;
+ break;
+ }
+ dp_ofs = n;
+ }
+ }
+ else if (dp_ofs > 0) {
+ while (dp_sg < MAX_SCATTER) {
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ dp_ofs -= (tmp & 0xffffff);
+ ++dp_sg;
+ if (dp_ofs <= 0)
+ break;
+ }
+ }
+
+ /*
+ ** Make sure the data pointer is inside the data area.
+ ** If not, return some error.
+ */
+ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
+ goto out_err;
+ else if (dp_sg > MAX_SCATTER || (dp_sg == MAX_SCATTER && dp_ofs > 0))
+ goto out_err;
+
+ /*
+ ** Save the extreme pointer if needed.
+ */
+ if (dp_sg > cp->ext_sg ||
+ (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
+ cp->ext_sg = dp_sg;
+ cp->ext_ofs = dp_ofs;
+ }
+
+ /*
+ ** Return data.
+ */
+ *ofs = dp_ofs;
+ return dp_sg;
+
+out_err:
+ return -1;
+}
+
+/*==========================================================
+**
+** ncr chip handler for MODIFY DATA POINTER MESSAGE
+**
+**==========================================================
+**
+** We also call this function on IGNORE WIDE RESIDUE
+** messages that do not match a SWIDE full condition.
+** Btw, we assume in that situation that such a message
+** is equivalent to a MODIFY DATA POINTER (offset=-1).
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_modify_dp(ncb_p np, tcb_p tp, ccb_p cp, int ofs)
+{
+ int dp_ofs = ofs;
+ u_int32 dp_scr = INL (nc_temp);
+ u_int32 dp_ret;
+ u_int32 tmp;
+ u_char hflags;
+ int dp_sg;
+ struct pm_ctx *pm;
+
+ /*
+ ** Not supported for auto_sense;
+ */
+ if (cp->host_flags & HF_AUTO_SENSE)
+ goto out_reject;
+
+ /*
+ ** Apply our alchemy:) (see comments in ncr_evaluate_dp()),
+ ** to the resulted data pointer.
+ */
+ dp_sg = ncr_evaluate_dp(np, cp, dp_scr, &dp_ofs);
+ if (dp_sg < 0)
+ goto out_reject;
+
+ /*
+ ** And our alchemy:) allows to easily calculate the data
+ ** script address we want to return for the next data phase.
+ */
+ dp_ret = cpu_to_scr(cp->phys.header.goalp);
+ dp_ret = dp_ret - 8 - (MAX_SCATTER - dp_sg) * (SCR_SG_SIZE*4);
+
+ /*
+ ** If offset / scatter entry is zero we donnot need
+ ** a context for the new current data pointer.
+ */
+ if (dp_ofs == 0) {
+ dp_scr = dp_ret;
+ goto out_ok;
+ }
+
+ /*
+ ** Get a context for the new current data pointer.
+ */
+ hflags = INB (HF_PRT);
+
+ if (hflags & HF_DP_SAVED)
+ hflags ^= HF_ACT_PM;
+
+ if (!(hflags & HF_ACT_PM)) {
+ pm = &cp->phys.pm0;
+ dp_scr = NCB_SCRIPT_PHYS (np, pm0_data);
+ }
+ else {
+ pm = &cp->phys.pm1;
+ dp_scr = NCB_SCRIPT_PHYS (np, pm1_data);
+ }
+
+ hflags &= ~(HF_DP_SAVED);
+
+ OUTB (HF_PRT, hflags);
+
+ /*
+ ** Set up the new current data pointer.
+ ** ofs < 0 there, and for the next data phase, we
+ ** want to transfer part of the data of the sg entry
+ ** corresponding to index dp_sg-1 prior to returning
+ ** to the main data script.
+ */
+ pm->ret = cpu_to_scr(dp_ret);
+ tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
+ tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
+ pm->sg.addr = cpu_to_scr(tmp);
+ pm->sg.size = cpu_to_scr(-dp_ofs);
+
+out_ok:
+ OUTL (nc_temp, dp_scr);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ return;
+
+out_reject:
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+}
+
+
+/*==========================================================
+**
+** ncr chip calculation of the data residual.
+**
+**==========================================================
+**
+** As I used to say, the requirement of data residual
+** in SCSI is broken, useless and cannot be achieved
+** without huge complexity.
+** But most OSes and even the official CAM require it.
+** When stupidity happens to be so widely spread inside
+** a community, it gets hard to convince.
+**
+** Anyway, I don't care, since I am not going to use
+** any software that considers this data residual as
+** a relevant information. :)
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_compute_residual(ncb_p np, ccb_p cp)
+{
+ int dp_sg, dp_sgmin, tmp;
+ int resid=0;
+ int dp_ofs = 0;
+
+ /*
+ * Check for some data lost or just thrown away.
+ * We are not required to be quite accurate in this
+ * situation. Btw, if we are odd for output and the
+ * device claims some more data, it may well happen
+ * than our residual be zero. :-)
+ */
+ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
+ if (cp->xerr_status & XE_EXTRA_DATA)
+ resid -= cp->extra_bytes;
+ if (cp->xerr_status & XE_SODL_UNRUN)
+ ++resid;
+ if (cp->xerr_status & XE_SWIDE_OVRUN)
+ --resid;
+ }
+
+
+ /*
+ ** If SCRIPTS reaches its goal point, then
+ ** there is no additionnal residual.
+ */
+ if (cp->phys.header.lastp == cp->phys.header.goalp)
+ return resid;
+
+ /*
+ ** If the last data pointer is data_io (direction
+ ** unknown), then no data transfer should have
+ ** taken place.
+ */
+ if (cp->phys.header.lastp == NCB_SCRIPTH_PHYS (np, data_io))
+ return cp->data_len;
+
+ /*
+ ** If no data transfer occurs, or if the data
+ ** pointer is weird, return full residual.
+ */
+ if (cp->startp == cp->phys.header.lastp ||
+ ncr_evaluate_dp(np, cp, scr_to_cpu(cp->phys.header.lastp),
+ &dp_ofs) < 0) {
+ return cp->data_len;
+ }
+
+ /*
+ ** We are now full comfortable in the computation
+ ** of the data residual (2's complement).
+ */
+ dp_sgmin = MAX_SCATTER - cp->segments;
+ resid = -cp->ext_ofs;
+ for (dp_sg = cp->ext_sg; dp_sg < MAX_SCATTER; ++dp_sg) {
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ resid += (tmp & 0xffffff);
+ }
+
+ /*
+ ** Hopefully, the result is not too wrong.
+ */
+ return resid;
+}
+
+/*==========================================================
+**
+** Print out the containt of a SCSI message.
+**
+**==========================================================
+*/
+
+static int ncr_show_msg (u_char * msg)
+{
+ u_char i;
+ printk ("%x",*msg);
+ if (*msg==M_EXTENDED) {
+ for (i=1;i<8;i++) {
+ if (i-1>msg[1]) break;
+ printk ("-%x",msg[i]);
+ };
+ return (i+1);
+ } else if ((*msg & 0xf0) == 0x20) {
+ printk ("-%x",msg[1]);
+ return (2);
+ };
+ return (1);
+}
+
+static void ncr_print_msg (ccb_p cp, char *label, u_char *msg)
+{
+ if (cp)
+ PRINT_ADDR(cp->cmd);
+ if (label)
+ printk ("%s: ", label);
+
+ (void) ncr_show_msg (msg);
+ printk (".\n");
+}
+
+/*===================================================================
+**
+** Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
+**
+**===================================================================
+**
+** Was Sie schon immer ueber transfermode negotiation wissen wollten ...
+**
+** We try to negotiate sync and wide transfer only after
+** a successfull inquire command. We look at byte 7 of the
+** inquire data to determine the capabilities of the target.
+**
+** When we try to negotiate, we append the negotiation message
+** to the identify and (maybe) simple tag message.
+** The host status field is set to HS_NEGOTIATE to mark this
+** situation.
+**
+** If the target doesn't answer this message immediately
+** (as required by the standard), the SIR_NEGO_FAILED interrupt
+** will be raised eventually.
+** The handler removes the HS_NEGOTIATE status, and sets the
+** negotiated value to the default (async / nowide).
+**
+** If we receive a matching answer immediately, we check it
+** for validity, and set the values.
+**
+** If we receive a Reject message immediately, we assume the
+** negotiation has failed, and fall back to standard values.
+**
+** If we receive a negotiation message while not in HS_NEGOTIATE
+** state, it's a target initiated negotiation. We prepare a
+** (hopefully) valid answer, set our parameters, and send back
+** this answer to the target.
+**
+** If the target doesn't fetch the answer (no message out phase),
+** we assume the negotiation has failed, and fall back to default
+** settings (SIR_NEGO_PROTO interrupt).
+**
+** When we set the values, we adjust them in all ccbs belonging
+** to this target, in the controller's register, and in the "phys"
+** field of the controller's struct ncb.
+**
+**---------------------------------------------------------------------
+*/
+
+/*==========================================================
+**
+** ncr chip handler for SYNCHRONOUS DATA TRANSFER
+** REQUEST (SDTR) message.
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_sync_nego(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ u_char scntl3, scntl4;
+ u_char chg, ofs, per, fak;
+
+ /*
+ ** Synchronous request message received.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "sync msg in", np->msgin);
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[4];
+ if (ofs==0) per=255;
+
+ /*
+ ** if target sends SDTR message,
+ ** it CAN transfer synch.
+ */
+
+ if (ofs)
+ tp->inq_byte7 |= INQ7_SYNC;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ if (per < tp->minsync)
+ {chg = 1; per = tp->minsync;}
+ if (ofs > tp->maxoffs)
+ {chg = 1; ofs = tp->maxoffs;}
+
+ /*
+ ** Check against controller limits.
+ */
+ fak = 7;
+ scntl3 = 0;
+ scntl4 = 0;
+ if (ofs != 0) {
+ ncr_getsync(np, per, &fak, &scntl3);
+ if (fak > 7) {
+ chg = 1;
+ ofs = 0;
+ }
+ }
+ if (ofs == 0) {
+ fak = 7;
+ per = 0;
+ scntl3 = 0;
+ scntl4 = 0;
+ tp->minsync = 0;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printk ("sync: per=%d scntl3=0x%x scntl4=0x%x ofs=%d fak=%d chg=%d.\n",
+ per, scntl3, scntl4, ofs, fak, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+ case NS_SYNC:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs,0);
+ else
+ ncr_setsync (np, cp, scntl3, ofs, scntl4);
+
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request. Set value and
+ ** prepare an answer message
+ */
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs,0);
+ else
+ ncr_setsync (np, cp, scntl3, ofs, scntl4);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 3;
+ np->msgout[2] = M_X_SYNC_REQ;
+ np->msgout[3] = per;
+ np->msgout[4] = ofs;
+
+ cp->nego_status = NS_SYNC;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "sync msgout", np->msgout);
+ }
+
+ np->msgin [0] = M_NOOP;
+
+ if (!ofs)
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ else
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sdtr_resp));
+}
+
+/*==========================================================
+**
+** ncr chip handler for WIDE DATA TRANSFER REQUEST
+** (WDTR) message.
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_wide_nego(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ u_char chg, wide;
+
+ /*
+ ** Wide request message received.
+ */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "wide msgin", np->msgin);
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ wide = np->msgin[3];
+
+ /*
+ ** if target sends WDTR message,
+ ** it CAN transfer wide.
+ */
+
+ if (wide)
+ tp->inq_byte7 |= INQ7_WIDE16;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (wide > tp->usrwide)
+ {chg = 1; wide = tp->usrwide;}
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printk ("wide: wide=%d chg=%d.\n", wide, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+ case NS_WIDE:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setwide (np, cp, 0, 1);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ ncr_setwide (np, cp, wide, 1);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request, set value and
+ ** prepare an answer message
+ */
+
+ ncr_setwide (np, cp, wide, 1);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 2;
+ np->msgout[2] = M_X_WIDE_REQ;
+ np->msgout[3] = wide;
+
+ np->msgin [0] = M_NOOP;
+
+ cp->nego_status = NS_WIDE;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "wide msgout", np->msgout);
+ }
+
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, wdtr_resp));
+}
+/*==========================================================
+**
+** ncr chip handler for PARALLEL PROTOCOL REQUEST
+** (PPR) message.
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_ppr_nego(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ u_char scntl3, scntl4;
+ u_char chg, ofs, per, fak, wth, dt;
+
+ /*
+ ** PPR message received.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "ppr msg in", np->msgin);
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[5];
+ wth = np->msgin[6];
+ dt = np->msgin[7];
+ if (ofs==0) per=255;
+
+ /*
+ ** if target sends sync (wide),
+ ** it CAN transfer synch (wide).
+ */
+
+ if (ofs)
+ tp->inq_byte7 |= INQ7_SYNC;
+
+ if (wth)
+ tp->inq_byte7 |= INQ7_WIDE16;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (wth > tp->usrwide)
+ {chg = 1; wth = tp->usrwide;}
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ if (per < tp->minsync)
+ {chg = 1; per = tp->minsync;}
+ if (ofs > tp->maxoffs)
+ {chg = 1; ofs = tp->maxoffs;}
+
+ /*
+ ** Check against controller limits.
+ */
+ fak = 7;
+ scntl3 = 0;
+ scntl4 = 0;
+ if (ofs != 0) {
+ scntl4 = dt ? 0x80 : 0;
+ ncr_getsync(np, per, &fak, &scntl3);
+ if (fak > 7) {
+ chg = 1;
+ ofs = 0;
+ }
+ }
+ if (ofs == 0) {
+ fak = 7;
+ per = 0;
+ scntl3 = 0;
+ scntl4 = 0;
+ tp->minsync = 0;
+ }
+
+ /*
+ ** If target responds with Ultra 3 speed
+ ** but narrow or not DT, reject.
+ ** If target responds with DT request
+ ** but not Ultra3 speeds, reject message,
+ ** reset min sync for target to 0x0A and
+ ** set flags to re-negotiate.
+ */
+
+ if ((per == 0x09) && ofs && (!wth || !dt))
+ chg = 1;
+ else if (( (per > 0x09) && dt) )
+ chg = 2;
+
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printk ("ppr: wth=%d per=%d scntl3=0x%x scntl4=0x%x ofs=%d fak=%d chg=%d.\n",
+ wth, per, scntl3, scntl4, ofs, fak, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+ case NS_PPR:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ if (chg == 2) {
+ /* Send message reject and reset flags for
+ ** host to re-negotiate with min period 0x0A.
+ */
+ tp->minsync = 0x0A;
+ tp->period = 0;
+ tp->widedone = 0;
+ }
+ ncr_setsyncwide (np, cp, 0, 0xe0, 0, 0);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsyncwide (np, cp, scntl3, (fak<<5)|ofs,0, wth);
+ else
+ ncr_setsyncwide (np, cp, scntl3, ofs, scntl4, wth);
+
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+
+ };
+ return;
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ break;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request. Set value and
+ ** prepare an answer message
+ **
+ ** If narrow or not DT and requesting Ultra3
+ ** slow the bus down and force ST. If not
+ ** requesting Ultra3, force ST.
+ ** Max offset is 31=0x1f if ST mode.
+ */
+
+ if ((per == 0x09) && ofs && (!wth || !dt)) {
+ per = 0x0A;
+ dt = 0;
+ ofs &= 0x1f;
+ }
+ else if ( (per > 0x09) && dt) {
+ dt = 0;
+ ofs &= 0x1f;
+ }
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsyncwide (np, cp, scntl3, (fak<<5)|ofs,0, wth);
+ else
+ ncr_setsyncwide (np, cp, scntl3, ofs, scntl4, wth);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 6;
+ np->msgout[2] = M_X_PPR_REQ;
+ np->msgout[3] = per;
+ np->msgout[4] = 0;
+ np->msgout[5] = ofs;
+ np->msgout[6] = wth;
+ np->msgout[7] = dt;
+
+ cp->nego_status = NS_PPR;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "ppr msgout", np->msgout);
+ }
+
+ np->msgin [0] = M_NOOP;
+
+ if (!ofs)
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ else
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, ppr_resp));
+}
+
+
+
+/*
+** Reset SYNC or WIDE to default settings.
+** Called when a negotiation does not succeed either
+** on rejection or on protocol error.
+*/
+static void ncr_nego_default(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ /*
+ ** any error in negotiation:
+ ** fall back to default mode.
+ */
+ switch (cp->nego_status) {
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ break;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+
+ case NS_PPR:
+ /*
+ * ppr_negotiation is set to 1 on the first ppr nego command.
+ * If ppr is successful, it is reset to 2.
+ * If unsuccessful it is reset to 0.
+ */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ tcb_p tp=&np->target[cp->target];
+ u_char factor, offset, width;
+
+ ncr_get_xfer_info ( np, tp, &factor, &offset, &width);
+
+ printk("Current factor %d offset %d width %d\n",
+ factor, offset, width);
+ }
+ if (tp->ppr_negotiation == 2)
+ ncr_setsyncwide (np, cp, 0, 0xe0, 0, 0);
+ else if (tp->ppr_negotiation == 1) {
+
+ /* First ppr command has received a M REJECT.
+ * Do not change the existing wide/sync parameter
+ * values (asyn/narrow if this as the first nego;
+ * may be different if target initiates nego.).
+ */
+ tp->ppr_negotiation = 0;
+ }
+ else
+ {
+ tp->ppr_negotiation = 0;
+ ncr_setwide (np, cp, 0, 0);
+ }
+ break;
+ };
+ np->msgin [0] = M_NOOP;
+ np->msgout[0] = M_NOOP;
+ cp->nego_status = 0;
+}
+
+/*==========================================================
+**
+** ncr chip handler for MESSAGE REJECT received for
+** a WIDE or SYNCHRONOUS negotiation.
+**
+** clear the PPR negotiation flag, all future nego.
+** will be SDTR and WDTR
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_nego_rejected(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ ncr_nego_default(np, tp, cp);
+ OUTB (HS_PRT, HS_BUSY);
+}
+
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for programmed interrupts.
+**
+**
+**==========================================================
+*/
+
+void ncr_int_sir (ncb_p np)
+{
+ u_char num = INB (nc_dsps);
+ u_long dsa = INL (nc_dsa);
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+ u_char target = INB (nc_sdid) & 0x0f;
+ tcb_p tp = &np->target[target];
+ int tmp;
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("I#%d", num);
+
+ switch (num) {
+ /*
+ ** See comments in the SCRIPTS code.
+ */
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ case SIR_DUMMY_INTERRUPT:
+ goto out;
+#endif
+
+ /*
+ ** The C code is currently trying to recover from something.
+ ** Typically, user want to abort some command.
+ */
+ case SIR_SCRIPT_STOPPED:
+ case SIR_TARGET_SELECTED:
+ case SIR_ABORT_SENT:
+ case SIR_AUTO_SENSE_DONE:
+ ncr_sir_task_recovery(np, num);
+ return;
+ /*
+ ** The device didn't go to MSG OUT phase after having
+ ** been selected with ATN. We donnot want to handle
+ ** that.
+ */
+ case SIR_SEL_ATN_NO_MSG_OUT:
+ printk ("%s:%d: No MSG OUT phase after selection with ATN.\n",
+ ncr_name (np), target);
+ goto out_stuck;
+ /*
+ ** The device didn't switch to MSG IN phase after
+ ** having reseleted the initiator.
+ */
+ case SIR_RESEL_NO_MSG_IN:
+ /*
+ ** After reselection, the device sent a message that wasn't
+ ** an IDENTIFY.
+ */
+ case SIR_RESEL_NO_IDENTIFY:
+ /*
+ ** If devices reselecting without sending an IDENTIFY
+ ** message still exist, this should help.
+ ** We just assume lun=0, 1 CCB, no tag.
+ */
+ if (tp->l0p) {
+ OUTL (nc_dsa, scr_to_cpu(tp->l0p->tasktbl[0]));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, resel_go));
+ return;
+ }
+ /*
+ ** The device reselected a LUN we donnot know of.
+ */
+ case SIR_RESEL_BAD_LUN:
+ np->msgout[0] = M_RESET;
+ goto out;
+ /*
+ ** The device reselected for an untagged nexus and we
+ ** haven't any.
+ */
+ case SIR_RESEL_BAD_I_T_L:
+ np->msgout[0] = M_ABORT;
+ goto out;
+ /*
+ ** The device reselected for a tagged nexus that we donnot
+ ** have.
+ */
+ case SIR_RESEL_BAD_I_T_L_Q:
+ np->msgout[0] = M_ABORT_TAG;
+ goto out;
+ /*
+ ** The SCRIPTS let us know that the device has grabbed
+ ** our message and will abort the job.
+ */
+ case SIR_RESEL_ABORTED:
+ np->lastmsg = np->msgout[0];
+ np->msgout[0] = M_NOOP;
+ printk ("%s:%d: message %x sent on bad reselection.\n",
+ ncr_name (np), target, np->lastmsg);
+ goto out;
+ /*
+ ** The SCRIPTS let us know that a message has been
+ ** successfully sent to the device.
+ */
+ case SIR_MSG_OUT_DONE:
+ np->lastmsg = np->msgout[0];
+ np->msgout[0] = M_NOOP;
+ /* Should we really care of that */
+ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
+ if (cp) {
+ cp->xerr_status &= ~XE_PARITY_ERR;
+ if (!cp->xerr_status)
+ OUTOFFB (HF_PRT, HF_EXT_ERR);
+ }
+ }
+ goto out;
+ /*
+ ** The device didn't send a GOOD SCSI status.
+ ** We may have some work to do prior to allow
+ ** the SCRIPTS processor to continue.
+ */
+ case SIR_BAD_STATUS:
+ if (!cp)
+ goto out;
+ ncr_sir_to_redo(np, num, cp);
+ return;
+ /*
+ ** We are asked by the SCRIPTS to prepare a
+ ** REJECT message.
+ */
+ case SIR_REJECT_TO_SEND:
+ ncr_print_msg(cp, "M_REJECT to send for ", np->msgin);
+ np->msgout[0] = M_REJECT;
+ goto out;
+ /*
+ ** We have been ODD at the end of a DATA IN
+ ** transfer and the device didn't send a
+ ** IGNORE WIDE RESIDUE message.
+ ** It is a data overrun condition.
+ */
+ case SIR_SWIDE_OVERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_SWIDE_OVRUN;
+ }
+ goto out;
+ /*
+ ** We have been ODD at the end of a DATA OUT
+ ** transfer.
+ ** It is a data underrun condition.
+ */
+ case SIR_SODL_UNDERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_SODL_UNRUN;
+ }
+ goto out;
+ /*
+ ** The device wants us to tranfer more data than
+ ** expected or in the wrong direction.
+ ** The number of extra bytes is in scratcha.
+ ** It is a data overrun condition.
+ */
+ case SIR_DATA_OVERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_EXTRA_DATA;
+ cp->extra_bytes += INL (nc_scratcha);
+ }
+ goto out;
+ /*
+ ** The device switched to an illegal phase (4/5).
+ */
+ case SIR_BAD_PHASE:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_BAD_PHASE;
+ }
+ goto out;
+ /*
+ ** We received a message.
+ */
+ case SIR_MSG_RECEIVED:
+ if (!cp)
+ goto out_stuck;
+ switch (np->msgin [0]) {
+ /*
+ ** We received an extended message.
+ ** We handle MODIFY DATA POINTER, SDTR, WDTR
+ ** and reject all other extended messages.
+ */
+ case M_EXTENDED:
+ switch (np->msgin [2]) {
+ case M_X_MODIFY_DP:
+ if (DEBUG_FLAGS & DEBUG_POINTER)
+ ncr_print_msg(cp,"modify DP",np->msgin);
+ tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
+ (np->msgin[5]<<8) + (np->msgin[6]);
+ ncr_modify_dp(np, tp, cp, tmp);
+ return;
+ case M_X_SYNC_REQ:
+ ncr_sync_nego(np, tp, cp);
+ return;
+ case M_X_WIDE_REQ:
+ ncr_wide_nego(np, tp, cp);
+ return;
+ case M_X_PPR_REQ:
+ ncr_ppr_nego(np, tp, cp);
+ return;
+ default:
+ goto out_reject;
+ }
+ break;
+ /*
+ ** We received a 1/2 byte message not handled from SCRIPTS.
+ ** We are only expecting MESSAGE REJECT and IGNORE WIDE
+ ** RESIDUE messages that haven't been anticipated by
+ ** SCRIPTS on SWIDE full condition. Unanticipated IGNORE
+ ** WIDE RESIDUE messages are aliased as MODIFY DP (-1).
+ */
+ case M_IGN_RESIDUE:
+ if (DEBUG_FLAGS & DEBUG_POINTER)
+ ncr_print_msg(cp,"ign wide residue", np->msgin);
+ ncr_modify_dp(np, tp, cp, -1);
+ return;
+ case M_REJECT:
+ if (INB (HS_PRT) == HS_NEGOTIATE)
+ ncr_nego_rejected(np, tp, cp);
+ else {
+ PRINT_ADDR(cp->cmd);
+ printk ("M_REJECT received (%x:%x).\n",
+ scr_to_cpu(np->lastmsg), np->msgout[0]);
+ }
+ goto out_clrack;
+ break;
+ default:
+ goto out_reject;
+ }
+ break;
+ /*
+ ** We received an unknown message.
+ ** Ignore all MSG IN phases and reject it.
+ */
+ case SIR_MSG_WEIRD:
+ ncr_print_msg(cp, "WEIRD message received", np->msgin);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_weird));
+ return;
+ /*
+ ** Negotiation failed.
+ ** Target does not send us the reply.
+ ** Remove the HS_NEGOTIATE status.
+ */
+ case SIR_NEGO_FAILED:
+ OUTB (HS_PRT, HS_BUSY);
+ /*
+ ** Negotiation failed.
+ ** Target does not want answer message.
+ */
+ case SIR_NEGO_PROTO:
+ ncr_nego_default(np, tp, cp);
+ goto out;
+ };
+
+out:
+ OUTONB_STD ();
+ return;
+out_reject:
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ return;
+out_clrack:
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ return;
+out_stuck:
+ return;
+}
+
+
+/*==========================================================
+**
+**
+** Aquire a control block
+**
+**
+**==========================================================
+*/
+
+static ccb_p ncr_get_ccb (ncb_p np, u_char tn, u_char ln)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+ u_short tag = NO_TAG;
+ XPT_QUEHEAD *qp;
+ ccb_p cp = (ccb_p) 0;
+
+ /*
+ ** Allocate a new CCB if needed.
+ */
+ if (xpt_que_empty(&np->free_ccbq))
+ (void) ncr_alloc_ccb(np);
+
+ /*
+ ** Look for a free CCB
+ */
+ qp = xpt_remque_head(&np->free_ccbq);
+ if (!qp)
+ goto out;
+ cp = xpt_que_entry(qp, struct ccb, link_ccbq);
+
+ /*
+ ** If the LCB is not yet available and we already
+ ** have queued a CCB for a LUN without LCB,
+ ** give up. Otherwise all is fine. :-)
+ */
+ if (!lp) {
+ if (xpt_que_empty(&np->b0_ccbq))
+ xpt_insque_head(&cp->link_ccbq, &np->b0_ccbq);
+ else
+ goto out_free;
+ } else {
+ /*
+ ** Tune tag mode if asked by user.
+ */
+ if (lp->queuedepth != lp->numtags) {
+ ncr_setup_tags(np, tn, ln);
+ }
+
+ /*
+ ** Get a tag for this nexus if required.
+ ** Keep from using more tags than we can handle.
+ */
+ if (lp->usetags) {
+ if (lp->busyccbs < lp->maxnxs) {
+ tag = lp->cb_tags[lp->ia_tag];
+ ++lp->ia_tag;
+ if (lp->ia_tag == MAX_TAGS)
+ lp->ia_tag = 0;
+ cp->tags_si = lp->tags_si;
+ ++lp->tags_sum[cp->tags_si];
+ }
+ else
+ goto out_free;
+ }
+
+ /*
+ ** Put the CCB in the LUN wait queue and
+ ** count it as busy.
+ */
+ xpt_insque_tail(&cp->link_ccbq, &lp->wait_ccbq);
+ ++lp->busyccbs;
+ }
+
+ /*
+ ** Remember all informations needed to free this CCB.
+ */
+ cp->to_abort = 0;
+ cp->tag = tag;
+ cp->target = tn;
+ cp->lun = ln;
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_LUN(np, tn, ln);
+ printk ("ccb @%p using tag %d.\n", cp, tag);
+ }
+
+out:
+ return cp;
+out_free:
+ xpt_insque_head(&cp->link_ccbq, &np->free_ccbq);
+ return (ccb_p) 0;
+}
+
+/*==========================================================
+**
+**
+** Release one control block
+**
+**
+**==========================================================
+*/
+
+static void ncr_free_ccb (ncb_p np, ccb_p cp)
+{
+ tcb_p tp = &np->target[cp->target];
+ lcb_p lp = ncr_lp(np, tp, cp->lun);
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_LUN(np, cp->target, cp->lun);
+ printk ("ccb @%p freeing tag %d.\n", cp, cp->tag);
+ }
+
+ /*
+ ** If lun control block available, make available
+ ** the task slot and the tag if any.
+ ** Decrement counters.
+ */
+ if (lp) {
+ if (cp->tag != NO_TAG) {
+ lp->cb_tags[lp->if_tag++] = cp->tag;
+ if (lp->if_tag == MAX_TAGS)
+ lp->if_tag = 0;
+ --lp->tags_sum[cp->tags_si];
+ lp->tasktbl[cp->tag] = cpu_to_scr(np->p_bad_i_t_l_q);
+ } else {
+ lp->tasktbl[0] = cpu_to_scr(np->p_bad_i_t_l);
+ }
+ --lp->busyccbs;
+ if (cp->queued) {
+ --lp->queuedccbs;
+ }
+ }
+
+ /*
+ ** Make this CCB available.
+ */
+ xpt_remque(&cp->link_ccbq);
+ xpt_insque_head(&cp->link_ccbq, &np->free_ccbq);
+ cp -> host_status = HS_IDLE;
+ cp -> queued = 0;
+}
+
+/*------------------------------------------------------------------------
+** Allocate a CCB and initialize its fixed part.
+**------------------------------------------------------------------------
+**------------------------------------------------------------------------
+*/
+static ccb_p ncr_alloc_ccb(ncb_p np)
+{
+ ccb_p cp = 0;
+ int hcode;
+
+ /*
+ ** Allocate memory for this CCB.
+ */
+ cp = m_calloc_dma(sizeof(struct ccb), "CCB");
+ if (!cp)
+ return 0;
+
+ /*
+ ** Count it and initialyze it.
+ */
+ np->actccbs++;
+
+ /*
+ ** Remember virtual and bus address of this ccb.
+ */
+ cp->p_ccb = vtobus(cp);
+
+ /*
+ ** Insert this ccb into the hashed list.
+ */
+ hcode = CCB_HASH_CODE(cp->p_ccb);
+ cp->link_ccbh = np->ccbh[hcode];
+ np->ccbh[hcode] = cp;
+
+ /*
+ ** Initialyze the start and restart actions.
+ */
+ cp->phys.header.go.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ cp->phys.header.go.restart = cpu_to_scr(NCB_SCRIPTH_PHYS(np,bad_i_t_l));
+
+ /*
+ ** Initilialyze some other fields.
+ */
+ cp->phys.smsg_ext.addr = cpu_to_scr(NCB_PHYS(np, msgin[2]));
+
+ /*
+ ** Chain into wakeup list and free ccb queue.
+ */
+ cp->link_ccb = np->ccbc;
+ np->ccbc = cp;
+
+ xpt_insque_head(&cp->link_ccbq, &np->free_ccbq);
+
+ return cp;
+}
+
+/*------------------------------------------------------------------------
+** Look up a CCB from a DSA value.
+**------------------------------------------------------------------------
+**------------------------------------------------------------------------
+*/
+static ccb_p ncr_ccb_from_dsa(ncb_p np, u_long dsa)
+{
+ int hcode;
+ ccb_p cp;
+
+ hcode = CCB_HASH_CODE(dsa);
+ cp = np->ccbh[hcode];
+ while (cp) {
+ if (cp->p_ccb == dsa)
+ break;
+ cp = cp->link_ccbh;
+ }
+
+ return cp;
+}
+
+/*==========================================================
+**
+**
+** Allocation of resources for Targets/Luns/Tags.
+**
+**
+**==========================================================
+*/
+
+
+/*------------------------------------------------------------------------
+** Target control block initialisation.
+**------------------------------------------------------------------------
+** This data structure is fully initialized after a SCSI command
+** has been successfully completed for this target.
+**------------------------------------------------------------------------
+*/
+static void ncr_init_tcb (ncb_p np, u_char tn)
+{
+ /*
+ ** Check some alignments required by the chip.
+ */
+ assert (( (offsetof(struct ncr_reg, nc_sxfer) ^
+ offsetof(struct tcb , sval )) &3) == 0);
+ assert (( (offsetof(struct ncr_reg, nc_scntl3) ^
+ offsetof(struct tcb , wval )) &3) == 0);
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)){
+ assert (( (offsetof(struct ncr_reg, nc_scntl4) ^
+ offsetof(struct tcb , uval )) &3) == 0);
+ }
+}
+
+/*------------------------------------------------------------------------
+** Lun control block allocation and initialization.
+**------------------------------------------------------------------------
+** This data structure is allocated and initialized after a SCSI
+** command has been successfully completed for this target/lun.
+**------------------------------------------------------------------------
+*/
+static lcb_p ncr_alloc_lcb (ncb_p np, u_char tn, u_char ln)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+
+ /*
+ ** Already done, return.
+ */
+ if (lp)
+ return lp;
+
+ /*
+ ** Initialize the target control block if not yet.
+ */
+ ncr_init_tcb(np, tn);
+
+ /*
+ ** Allocate the lcb bus address array.
+ ** Compute the bus address of this table.
+ */
+ if (ln && !tp->luntbl) {
+ int i;
+
+ tp->luntbl = m_calloc_dma(256, "LUNTBL");
+ if (!tp->luntbl)
+ goto fail;
+ for (i = 0 ; i < 64 ; i++)
+ tp->luntbl[i] = cpu_to_scr(NCB_PHYS(np, resel_badlun));
+ tp->b_luntbl = cpu_to_scr(vtobus(tp->luntbl));
+ }
+
+ /*
+ ** Allocate the table of pointers for LUN(s) > 0, if needed.
+ */
+ if (ln && !tp->lmp) {
+ tp->lmp = m_calloc(MAX_LUN * sizeof(lcb_p), "LMP");
+ if (!tp->lmp)
+ goto fail;
+ }
+
+ /*
+ ** Allocate the lcb.
+ ** Make it available to the chip.
+ */
+ lp = m_calloc_dma(sizeof(struct lcb), "LCB");
+ if (!lp)
+ goto fail;
+ if (ln) {
+ tp->lmp[ln] = lp;
+ tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
+ }
+ else {
+ tp->l0p = lp;
+ tp->b_lun0 = cpu_to_scr(vtobus(lp));
+ }
+
+ /*
+ ** Initialize the CCB queue headers.
+ */
+ xpt_que_init(&lp->busy_ccbq);
+ xpt_que_init(&lp->wait_ccbq);
+
+ /*
+ ** Set max CCBs to 1 and use the default task array
+ ** by default.
+ */
+ lp->maxnxs = 1;
+ lp->tasktbl = &lp->tasktbl_0;
+ lp->b_tasktbl = cpu_to_scr(vtobus(lp->tasktbl));
+ lp->tasktbl[0] = cpu_to_scr(np->p_notask);
+ lp->resel_task = cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag));
+
+ /*
+ ** Initialize command queuing control.
+ */
+ lp->busyccbs = 1;
+ lp->queuedccbs = 1;
+ lp->queuedepth = 1;
+fail:
+ return lp;
+}
+
+
+/*------------------------------------------------------------------------
+** Lun control block setup on INQUIRY data received.
+**------------------------------------------------------------------------
+** We only support WIDE, SYNC for targets and CMDQ for logical units.
+** This setup is done on each INQUIRY since we are expecting user
+** will play with CHANGE DEFINITION commands. :-)
+**------------------------------------------------------------------------
+*/
+static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln, u_char *inq_data)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+ u_char inq_byte7;
+ int i;
+
+ /*
+ ** If no lcb, try to allocate it.
+ */
+ if (!lp && !(lp = ncr_alloc_lcb(np, tn, ln)))
+ goto fail;
+
+#if 0 /* No more used. Left here as provision */
+ /*
+ ** Get device quirks.
+ */
+ tp->quirks = 0;
+ if (tp->quirks && bootverbose) {
+ PRINT_LUN(np, tn, ln);
+ printk ("quirks=%x.\n", tp->quirks);
+ }
+#endif
+
+ /*
+ ** Evaluate trustable target/unit capabilities.
+ ** We only believe device version >= SCSI-2 that
+ ** use appropriate response data format (2).
+ ** But it seems that some CCS devices also
+ ** support SYNC and I donnot want to frustrate
+ ** anybody. ;-)
+ */
+ inq_byte7 = 0;
+ if ((inq_data[2] & 0x7) >= 2 && (inq_data[3] & 0xf) == 2)
+ inq_byte7 = inq_data[7];
+ else if ((inq_data[2] & 0x7) == 1 && (inq_data[3] & 0xf) == 1)
+ inq_byte7 = INQ7_SYNC;
+
+ /*
+ ** Throw away announced LUN capabilities if we are told
+ ** that there is no real device supported by the logical unit.
+ */
+ if ((inq_data[0] & 0xe0) > 0x20 || (inq_data[0] & 0x1f) == 0x1f)
+ inq_byte7 &= (INQ7_SYNC | INQ7_WIDE16);
+
+ /*
+ ** If user is wanting SYNC, force this feature.
+ */
+ if (driver_setup.force_sync_nego)
+ inq_byte7 |= INQ7_SYNC;
+
+ /*
+ ** Prepare negotiation if SIP capabilities have changed.
+ */
+ tp->inq_done = 1;
+ if ((inq_byte7 ^ tp->inq_byte7) & (INQ7_SYNC | INQ7_WIDE16)) {
+ tp->inq_byte7 = inq_byte7;
+ ncr_negotiate(np, tp);
+ }
+
+ /*
+ ** If unit supports tagged commands, allocate and
+ ** initialyze the task table if not yet.
+ */
+ if ((inq_byte7 & INQ7_QUEUE) && lp->tasktbl == &lp->tasktbl_0) {
+ lp->tasktbl = m_calloc_dma(MAX_TASKS*4, "TASKTBL");
+ if (!lp->tasktbl) {
+ lp->tasktbl = &lp->tasktbl_0;
+ goto fail;
+ }
+ lp->b_tasktbl = cpu_to_scr(vtobus(lp->tasktbl));
+ for (i = 0 ; i < MAX_TASKS ; i++)
+ lp->tasktbl[i] = cpu_to_scr(np->p_notask);
+
+ lp->cb_tags = m_calloc(MAX_TAGS, "CB_TAGS");
+ if (!lp->cb_tags)
+ goto fail;
+ for (i = 0 ; i < MAX_TAGS ; i++)
+ lp->cb_tags[i] = i;
+
+ lp->maxnxs = MAX_TAGS;
+ lp->tags_stime = ktime_get(3*HZ);
+ }
+
+ /*
+ ** Adjust tagged queueing status if needed.
+ */
+ if ((inq_byte7 ^ lp->inq_byte7) & INQ7_QUEUE) {
+ lp->inq_byte7 = inq_byte7;
+ lp->numtags = lp->maxtags;
+ ncr_setup_tags (np, tn, ln);
+ }
+
+fail:
+ return lp;
+}
+
+/*==========================================================
+**
+**
+** Build Scatter Gather Block
+**
+**
+**==========================================================
+**
+** The transfer area may be scattered among
+** several non adjacent physical pages.
+**
+** We may use MAX_SCATTER blocks.
+**
+**----------------------------------------------------------
+*/
+
+/*
+** We try to reduce the number of interrupts caused
+** by unexpected phase changes due to disconnects.
+** A typical harddisk may disconnect before ANY block.
+** If we wanted to avoid unexpected phase changes at all
+** we had to use a break point every 512 bytes.
+** Of course the number of scatter/gather blocks is
+** limited.
+** Under Linux, the scatter/gatter blocks are provided by
+** the generic driver. We just have to copy addresses and
+** sizes to the data segment array.
+*/
+
+/*
+** For 64 bit systems, we use the 8 upper bits of the size field
+** to provide bus address bits 32-39 to the SCRIPTS processor.
+** This allows the 895A and 896 to address up to 1 TB of memory.
+** For 32 bit chips on 64 bit systems, we must be provided with
+** memory addresses that fit into the first 32 bit bus address
+** range and so, this does not matter and we expect an error from
+** the chip if this ever happen.
+**
+** We use a separate function for the case Linux does not provide
+** a scatter list in order to allow better code optimization
+** for the case we have a scatter list (BTW, for now this just wastes
+** about 40 bytes of code for x86, but my guess is that the scatter
+** code will get more complex later).
+*/
+
+#ifdef SCSI_NCR_USE_64BIT_DAC
+#define SCATTER_ONE(data, badd, len) \
+ (data)->addr = cpu_to_scr(badd); \
+ (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len);
+#else
+#define SCATTER_ONE(data, badd, len) \
+ (data)->addr = cpu_to_scr(badd); \
+ (data)->size = cpu_to_scr(len);
+#endif
+
+#define CROSS_16MB(p, n) (((((u_long) p) + n - 1) ^ ((u_long) p)) & ~0xffffff)
+
+static int ncr_scatter_no_sglist(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
+{
+ struct scr_tblmove *data = &cp->phys.data[MAX_SCATTER-1];
+ int segment;
+
+ cp->data_len = cmd->request_bufflen;
+
+ if (cmd->request_bufflen) {
+ u_long baddr = map_scsi_single_data(np, cmd);
+
+ SCATTER_ONE(data, baddr, cmd->request_bufflen);
+ if (CROSS_16MB(baddr, cmd->request_bufflen)) {
+ cp->host_flags |= HF_PM_TO_C;
+#ifdef DEBUG_896R1
+printk("He! we are crossing a 16 MB boundary (0x%lx, 0x%x)\n",
+ baddr, cmd->request_bufflen);
+#endif
+ }
+ segment = 1;
+ }
+ else
+ segment = 0;
+
+ return segment;
+}
+
+/*
+** DEL 472 - 53C896 Rev 1 - Part Number 609-0393055 - ITEM 5.
+**
+** We disable data phase mismatch handling from SCRIPTS for data
+** transfers that contains scatter/gather entries that cross
+** a 16 MB boundary.
+** We use a different scatter function for 896 rev. 1 that needs
+** such a work-around. Doing so, we do not affect performance for
+** other chips.
+** This problem should not be triggered for disk IOs under Linux,
+** since such IOs are performed using pages and buffers that are
+** nicely power-of-two sized and aligned. But, since this may change
+** at any time, a work-around was required.
+*/
+static int ncr_scatter_896R1(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
+{
+ int segn;
+ int use_sg = (int) cmd->use_sg;
+
+ cp->data_len = 0;
+
+ if (!use_sg)
+ segn = ncr_scatter_no_sglist(np, cp, cmd);
+ else if (use_sg > MAX_SCATTER)
+ segn = -1;
+ else {
+ struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
+ struct scr_tblmove *data;
+
+ use_sg = map_scsi_sg_data(np, cmd);
+ data = &cp->phys.data[MAX_SCATTER - use_sg];
+
+ for (segn = 0; segn < use_sg; segn++) {
+ u_long baddr = scsi_sg_dma_address(&scatter[segn]);
+ unsigned int len = scsi_sg_dma_len(&scatter[segn]);
+
+ SCATTER_ONE(&data[segn],
+ baddr,
+ len);
+ if (CROSS_16MB(baddr, scatter[segn].length)) {
+ cp->host_flags |= HF_PM_TO_C;
+#ifdef DEBUG_896R1
+printk("He! we are crossing a 16 MB boundary (0x%lx, 0x%x)\n",
+ baddr, scatter[segn].length);
+#endif
+ }
+ cp->data_len += len;
+ }
+ }
+
+ return segn;
+}
+
+static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
+{
+ int segment;
+ int use_sg = (int) cmd->use_sg;
+
+ cp->data_len = 0;
+
+ if (!use_sg)
+ segment = ncr_scatter_no_sglist(np, cp, cmd);
+ else if (use_sg > MAX_SCATTER)
+ segment = -1;
+ else {
+ struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
+ struct scr_tblmove *data;
+
+ use_sg = map_scsi_sg_data(np, cmd);
+ data = &cp->phys.data[MAX_SCATTER - use_sg];
+
+ for (segment = 0; segment < use_sg; segment++) {
+ u_long baddr = scsi_sg_dma_address(&scatter[segment]);
+ unsigned int len = scsi_sg_dma_len(&scatter[segment]);
+
+ SCATTER_ONE(&data[segment],
+ baddr,
+ len);
+ cp->data_len += len;
+ }
+ }
+
+ return segment;
+}
+
+/*==========================================================
+**
+**
+** Test the pci bus snoop logic :-(
+**
+** Has to be called with interrupts disabled.
+**
+**
+**==========================================================
+*/
+
+#ifndef SCSI_NCR_IOMAPPED
+static int __init ncr_regtest (struct ncb* np)
+{
+ register volatile u_int32 data;
+ /*
+ ** ncr registers may NOT be cached.
+ ** write 0xffffffff to a read only register area,
+ ** and try to read it back.
+ */
+ data = 0xffffffff;
+ OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data);
+ data = INL_OFF(offsetof(struct ncr_reg, nc_dstat));
+#if 1
+ if (data == 0xffffffff) {
+#else
+ if ((data & 0xe2f0fffd) != 0x02000080) {
+#endif
+ printk ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
+ (unsigned) data);
+ return (0x10);
+ };
+ return (0);
+}
+#endif
+
+static int __init ncr_snooptest (struct ncb* np)
+{
+ u_int32 ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc;
+ int i, err=0;
+#ifndef SCSI_NCR_IOMAPPED
+ if (np->reg) {
+ err |= ncr_regtest (np);
+ if (err) return (err);
+ }
+#endif
+ /*
+ ** init
+ */
+ pc = NCB_SCRIPTH0_PHYS (np, snooptest);
+ host_wr = 1;
+ ncr_wr = 2;
+ /*
+ ** Set memory and register.
+ */
+ np->ncr_cache = cpu_to_scr(host_wr);
+ OUTL (nc_temp, ncr_wr);
+ /*
+ ** Start script (exchange values)
+ */
+ OUTL (nc_dsa, np->p_ncb);
+ OUTL_DSP (pc);
+ /*
+ ** Wait 'til done (with timeout)
+ */
+ for (i=0; i<NCR_SNOOP_TIMEOUT; i++)
+ if (INB(nc_istat) & (INTF|SIP|DIP))
+ break;
+ /*
+ ** Save termination position.
+ */
+ pc = INL (nc_dsp);
+ /*
+ ** Read memory and register.
+ */
+ host_rd = scr_to_cpu(np->ncr_cache);
+ ncr_rd = INL (nc_scratcha);
+ ncr_bk = INL (nc_temp);
+
+ /*
+ ** check for timeout
+ */
+ if (i>=NCR_SNOOP_TIMEOUT) {
+ printk ("CACHE TEST FAILED: timeout.\n");
+ return (0x20);
+ };
+ /*
+ ** Check termination position.
+ */
+ if (pc != NCB_SCRIPTH0_PHYS (np, snoopend)+8) {
+ printk ("CACHE TEST FAILED: script execution failed.\n");
+ printk ("start=%08lx, pc=%08lx, end=%08lx\n",
+ (u_long) NCB_SCRIPTH0_PHYS (np, snooptest), (u_long) pc,
+ (u_long) NCB_SCRIPTH0_PHYS (np, snoopend) +8);
+ return (0x40);
+ };
+ /*
+ ** Show results.
+ */
+ if (host_wr != ncr_rd) {
+ printk ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n",
+ (int) host_wr, (int) ncr_rd);
+ err |= 1;
+ };
+ if (host_rd != ncr_wr) {
+ printk ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n",
+ (int) ncr_wr, (int) host_rd);
+ err |= 2;
+ };
+ if (ncr_bk != ncr_wr) {
+ printk ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n",
+ (int) ncr_wr, (int) ncr_bk);
+ err |= 4;
+ };
+ return (err);
+}
+
+/*==========================================================
+**
+** Determine the ncr's clock frequency.
+** This is essential for the negotiation
+** of the synchronous transfer rate.
+**
+**==========================================================
+**
+** Note: we have to return the correct value.
+** THERE IS NO SAFE DEFAULT VALUE.
+**
+** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
+** 53C860 and 53C875 rev. 1 support fast20 transfers but
+** do not have a clock doubler and so are provided with a
+** 80 MHz clock. All other fast20 boards incorporate a doubler
+** and so should be delivered with a 40 MHz clock.
+** The recent fast40 chips (895/896/895A) and the
+** fast80 chip (C1010) use a 40 Mhz base clock
+** and provide a clock quadrupler (160 Mhz). The code below
+** tries to deal as cleverly as possible with all this stuff.
+**
+**----------------------------------------------------------
+*/
+
+/*
+ * Select NCR SCSI clock frequency
+ */
+static void ncr_selectclock(ncb_p np, u_char scntl3)
+{
+ if (np->multiplier < 2) {
+ OUTB(nc_scntl3, scntl3);
+ return;
+ }
+
+ if (bootverbose >= 2)
+ printk ("%s: enabling clock multiplier\n", ncr_name(np));
+
+ OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
+
+ if ( (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) &&
+ (np->multiplier > 2)) {
+ int i = 20; /* Poll bit 5 of stest4 for quadrupler */
+ while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
+ UDELAY (20);
+ if (!i)
+ printk("%s: the chip cannot lock the frequency\n",
+ ncr_name(np));
+
+ } else /* Wait 120 micro-seconds for multiplier*/
+ UDELAY (120);
+
+ OUTB(nc_stest3, HSC); /* Halt the scsi clock */
+ OUTB(nc_scntl3, scntl3);
+ OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
+ OUTB(nc_stest3, 0x00); /* Restart scsi clock */
+}
+
+
+/*
+ * calculate NCR SCSI clock frequency (in KHz)
+ */
+static unsigned __init ncrgetfreq (ncb_p np, int gen)
+{
+ unsigned int ms = 0;
+ unsigned int f;
+ int count;
+
+ /*
+ * Measure GEN timer delay in order
+ * to calculate SCSI clock frequency
+ *
+ * This code will never execute too
+ * many loop iterations (if DELAY is
+ * reasonably correct). It could get
+ * too low a delay (too high a freq.)
+ * if the CPU is slow executing the
+ * loop for some reason (an NMI, for
+ * example). For this reason we will
+ * if multiple measurements are to be
+ * performed trust the higher delay
+ * (lower frequency returned).
+ */
+ OUTW (nc_sien , 0x0);/* mask all scsi interrupts */
+ /* enable general purpose timer */
+ (void) INW (nc_sist); /* clear pending scsi interrupt */
+ OUTB (nc_dien , 0); /* mask all dma interrupts */
+ (void) INW (nc_sist); /* another one, just to be sure :) */
+ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
+ /* Temporary fix for udelay issue with Alpha
+ platform */
+ while (!(INW(nc_sist) & GEN) && ms++ < 100000) {
+ /* count 1ms */
+ for (count = 0; count < 10; count++)
+ UDELAY (100);
+ }
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ /*
+ * set prescaler to divide by whatever 0 means
+ * 0 ought to choose divide by 2, but appears
+ * to set divide by 3.5 mode in my 53c810 ...
+ */
+ OUTB (nc_scntl3, 0);
+
+ /*
+ * adjust for prescaler, and convert into KHz
+ * scale values derived empirically. C1010 uses
+ * different dividers
+ */
+#if 0
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010)
+ f = ms ? ((1 << gen) * 2866 ) / ms : 0;
+ else
+#endif
+ f = ms ? ((1 << gen) * 4340) / ms : 0;
+
+ if (bootverbose >= 2)
+ printk ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
+ ncr_name(np), gen, ms, f);
+
+ return f;
+}
+
+static unsigned __init ncr_getfreq (ncb_p np)
+{
+ u_int f1, f2;
+ int gen = 11;
+
+ (void) ncrgetfreq (np, gen); /* throw away first result */
+ f1 = ncrgetfreq (np, gen);
+ f2 = ncrgetfreq (np, gen);
+ if (f1 > f2) f1 = f2; /* trust lower result */
+ return f1;
+}
+
+/*
+ * Get/probe NCR SCSI clock frequency
+ */
+static void __init ncr_getclock (ncb_p np, int mult)
+{
+ unsigned char scntl3 = np->sv_scntl3;
+ unsigned char stest1 = np->sv_stest1;
+ unsigned f1;
+
+ np->multiplier = 1;
+ f1 = 40000;
+
+ /*
+ ** True with 875/895/896/895A with clock multiplier selected
+ */
+ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier found\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+
+ /*
+ ** If multiplier not found but a C1010, assume a mult of 4.
+ ** If multiplier not found or scntl3 not 7,5,3,
+ ** reset chip and get frequency from general purpose timer.
+ ** Otherwise trust scntl3 BIOS setting.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ f1=40000;
+ np->multiplier = mult;
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier assumed\n", ncr_name(np));
+ }
+ else if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
+ OUTB (nc_stest1, 0); /* make sure doubler is OFF */
+ f1 = ncr_getfreq (np);
+
+ if (bootverbose)
+ printk ("%s: NCR clock is %uKHz\n", ncr_name(np), f1);
+
+ if (f1 < 55000) f1 = 40000;
+ else f1 = 80000;
+
+ /*
+ ** Suggest to also check the PCI clock frequency
+ ** to make sure our frequency calculation algorithm
+ ** is not too biased.
+ */
+ if (np->features & FE_66MHZ) {
+ np->pciclock_min = (66000*55+80-1)/80;
+ np->pciclock_max = (66000*55)/40;
+ }
+ else {
+ np->pciclock_min = (33000*55+80-1)/80;
+ np->pciclock_max = (33000*55)/40;
+ }
+
+ if (f1 == 40000 && mult > 1) {
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier assumed\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+ } else {
+ if ((scntl3 & 7) == 3) f1 = 40000;
+ else if ((scntl3 & 7) == 5) f1 = 80000;
+ else f1 = 160000;
+
+ f1 /= np->multiplier;
+ }
+
+ /*
+ ** Compute controller synchronous parameters.
+ */
+ f1 *= np->multiplier;
+ np->clock_khz = f1;
+}
+
+/*
+ * Get/probe PCI clock frequency
+ */
+static u_int __init ncr_getpciclock (ncb_p np)
+{
+ static u_int f;
+
+ OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
+ f = ncr_getfreq (np);
+ OUTB (nc_stest1, 0);
+
+ return f;
+}
+
+/*===================== LINUX ENTRY POINTS SECTION ==========================*/
+
+#ifndef uchar
+#define uchar unsigned char
+#endif
+
+#ifndef ushort
+#define ushort unsigned short
+#endif
+
+#ifndef ulong
+#define ulong unsigned long
+#endif
+
+/* ---------------------------------------------------------------------
+**
+** Driver setup from the boot command line
+**
+** ---------------------------------------------------------------------
+*/
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+#define OPT_TAGS 1
+#define OPT_MASTER_PARITY 2
+#define OPT_SCSI_PARITY 3
+#define OPT_DISCONNECTION 4
+#define OPT_SPECIAL_FEATURES 5
+#define OPT_ULTRA_SCSI 6
+#define OPT_FORCE_SYNC_NEGO 7
+#define OPT_REVERSE_PROBE 8
+#define OPT_DEFAULT_SYNC 9
+#define OPT_VERBOSE 10
+#define OPT_DEBUG 11
+#define OPT_BURST_MAX 12
+#define OPT_LED_PIN 13
+#define OPT_MAX_WIDE 14
+#define OPT_SETTLE_DELAY 15
+#define OPT_DIFF_SUPPORT 16
+#define OPT_IRQM 17
+#define OPT_PCI_FIX_UP 18
+#define OPT_BUS_CHECK 19
+#define OPT_OPTIMIZE 20
+#define OPT_RECOVERY 21
+#define OPT_SAFE_SETUP 22
+#define OPT_USE_NVRAM 23
+#define OPT_EXCLUDE 24
+#define OPT_HOST_ID 25
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define OPT_IARB 26
+#endif
+
+static char setup_token[] __initdata =
+ "tags:" "mpar:"
+ "spar:" "disc:"
+ "specf:" "ultra:"
+ "fsn:" "revprob:"
+ "sync:" "verb:"
+ "debug:" "burst:"
+ "led:" "wide:"
+ "settle:" "diff:"
+ "irqm:" "pcifix:"
+ "buschk:" "optim:"
+ "recovery:"
+ "safe:" "nvram:"
+ "excl:" "hostid:"
+#ifdef SCSI_NCR_IARB_SUPPORT
+ "iarb:"
+#endif
+ ; /* DONNOT REMOVE THIS ';' */
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+static int __init get_setup_token(char *p)
+{
+ char *cur = setup_token;
+ char *pc;
+ int i = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ ++pc;
+ ++i;
+ if (!strncmp(p, cur, pc - cur))
+ return i;
+ cur = pc;
+ }
+ return 0;
+}
+
+
+int __init sym53c8xx_setup(char *str)
+{
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+ char *cur = str;
+ char *pc, *pv;
+ unsigned long val;
+ int i, c;
+ int xi = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ char *pe;
+
+ val = 0;
+ pv = pc;
+ c = *++pv;
+
+ if (c == 'n')
+ val = 0;
+ else if (c == 'y')
+ val = 1;
+ else
+ val = (int) simple_strtoul(pv, &pe, 0);
+
+ switch (get_setup_token(cur)) {
+ case OPT_TAGS:
+ driver_setup.default_tags = val;
+ if (pe && *pe == '/') {
+ i = 0;
+ while (*pe && *pe != ARG_SEP &&
+ i < sizeof(driver_setup.tag_ctrl)-1) {
+ driver_setup.tag_ctrl[i++] = *pe++;
+ }
+ driver_setup.tag_ctrl[i] = '\0';
+ }
+ break;
+ case OPT_MASTER_PARITY:
+ driver_setup.master_parity = val;
+ break;
+ case OPT_SCSI_PARITY:
+ driver_setup.scsi_parity = val;
+ break;
+ case OPT_DISCONNECTION:
+ driver_setup.disconnection = val;
+ break;
+ case OPT_SPECIAL_FEATURES:
+ driver_setup.special_features = val;
+ break;
+ case OPT_ULTRA_SCSI:
+ driver_setup.ultra_scsi = val;
+ break;
+ case OPT_FORCE_SYNC_NEGO:
+ driver_setup.force_sync_nego = val;
+ break;
+ case OPT_REVERSE_PROBE:
+ driver_setup.reverse_probe = val;
+ break;
+ case OPT_DEFAULT_SYNC:
+ driver_setup.default_sync = val;
+ break;
+ case OPT_VERBOSE:
+ driver_setup.verbose = val;
+ break;
+ case OPT_DEBUG:
+ driver_setup.debug = val;
+ break;
+ case OPT_BURST_MAX:
+ driver_setup.burst_max = val;
+ break;
+ case OPT_LED_PIN:
+ driver_setup.led_pin = val;
+ break;
+ case OPT_MAX_WIDE:
+ driver_setup.max_wide = val? 1:0;
+ break;
+ case OPT_SETTLE_DELAY:
+ driver_setup.settle_delay = val;
+ break;
+ case OPT_DIFF_SUPPORT:
+ driver_setup.diff_support = val;
+ break;
+ case OPT_IRQM:
+ driver_setup.irqm = val;
+ break;
+ case OPT_PCI_FIX_UP:
+ driver_setup.pci_fix_up = val;
+ break;
+ case OPT_BUS_CHECK:
+ driver_setup.bus_check = val;
+ break;
+ case OPT_OPTIMIZE:
+ driver_setup.optimize = val;
+ break;
+ case OPT_RECOVERY:
+ driver_setup.recovery = val;
+ break;
+ case OPT_USE_NVRAM:
+ driver_setup.use_nvram = val;
+ break;
+ case OPT_SAFE_SETUP:
+ memcpy(&driver_setup, &driver_safe_setup,
+ sizeof(driver_setup));
+ break;
+ case OPT_EXCLUDE:
+ if (xi < SCSI_NCR_MAX_EXCLUDES)
+ driver_setup.excludes[xi++] = val;
+ break;
+ case OPT_HOST_ID:
+ driver_setup.host_id = val;
+ break;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ case OPT_IARB:
+ driver_setup.iarb = val;
+ break;
+#endif
+ default:
+ printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
+ break;
+ }
+
+ if ((cur = strchr(cur, ARG_SEP)) != NULL)
+ ++cur;
+ }
+#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
+ return 1;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,13)
+#ifndef MODULE
+__setup("sym53c8xx=", sym53c8xx_setup);
+#endif
+#endif
+
+static int
+sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device);
+
+/*
+** Linux entry point for SYM53C8XX devices detection routine.
+**
+** Called by the middle-level scsi drivers at initialization time,
+** or at module installation.
+**
+** Read the PCI configuration and try to attach each
+** detected NCR board.
+**
+** If NVRAM is present, try to attach boards according to
+** the used defined boot order.
+**
+** Returns the number of boards successfully attached.
+*/
+
+static void __init ncr_print_driver_setup(void)
+{
+#define YesNo(y) y ? 'y' : 'n'
+ printk (NAME53C8XX ": setup=disc:%c,specf:%d,ultra:%d,tags:%d,sync:%d,"
+ "burst:%d,wide:%c,diff:%d,revprob:%c,buschk:0x%x\n",
+ YesNo(driver_setup.disconnection),
+ driver_setup.special_features,
+ driver_setup.ultra_scsi,
+ driver_setup.default_tags,
+ driver_setup.default_sync,
+ driver_setup.burst_max,
+ YesNo(driver_setup.max_wide),
+ driver_setup.diff_support,
+ YesNo(driver_setup.reverse_probe),
+ driver_setup.bus_check);
+
+ printk (NAME53C8XX ": setup=mpar:%c,spar:%c,fsn=%c,verb:%d,debug:0x%x,"
+ "led:%c,settle:%d,irqm:0x%x,nvram:0x%x,pcifix:0x%x\n",
+ YesNo(driver_setup.master_parity),
+ YesNo(driver_setup.scsi_parity),
+ YesNo(driver_setup.force_sync_nego),
+ driver_setup.verbose,
+ driver_setup.debug,
+ YesNo(driver_setup.led_pin),
+ driver_setup.settle_delay,
+ driver_setup.irqm,
+ driver_setup.use_nvram,
+ driver_setup.pci_fix_up);
+#undef YesNo
+}
+
+/*===================================================================
+** SYM53C8XX devices description table and chip ids list.
+**===================================================================
+*/
+
+static ncr_chip ncr_chip_table[] __initdata = SCSI_NCR_CHIP_TABLE;
+static ushort ncr_chip_ids[] __initdata = SCSI_NCR_CHIP_IDS;
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+/*===================================================================
+** Detect all NCR PQS/PDS boards and keep track of their bus nr.
+**
+** The NCR PQS or PDS card is constructed as a DEC bridge
+** behind which sit a proprietary NCR memory controller and
+** four or two 53c875s as separate devices. In its usual mode
+** of operation, the 875s are slaved to the memory controller
+** for all transfers. We can tell if an 875 is part of a
+** PQS/PDS or not since if it is, it will be on the same bus
+** as the memory controller. To operate with the Linux
+** driver, the memory controller is disabled and the 875s
+** freed to function independently. The only wrinkle is that
+** the preset SCSI ID (which may be zero) must be read in from
+** a special configuration space register of the 875
+**===================================================================
+*/
+#define SCSI_NCR_MAX_PQS_BUS 16
+static int pqs_bus[SCSI_NCR_MAX_PQS_BUS] __initdata = { 0 };
+
+static void __init ncr_detect_pqs_pds(void)
+{
+ short index;
+ pcidev_t dev = PCIDEV_NULL;
+
+ for(index=0; index < SCSI_NCR_MAX_PQS_BUS; index++) {
+ u_char tmp;
+
+ dev = pci_find_device(0x101a, 0x0009, dev);
+ if (dev == PCIDEV_NULL) {
+ pqs_bus[index] = -1;
+ break;
+ }
+ printk(KERN_INFO NAME53C8XX ": NCR PQS/PDS memory controller detected on bus %d\n", PciBusNumber(dev));
+ pci_read_config_byte(dev, 0x44, &tmp);
+ /* bit 1: allow individual 875 configuration */
+ tmp |= 0x2;
+ pci_write_config_byte(dev, 0x44, tmp);
+ pci_read_config_byte(dev, 0x45, &tmp);
+ /* bit 2: drive individual 875 interrupts to the bus */
+ tmp |= 0x4;
+ pci_write_config_byte(dev, 0x45, tmp);
+
+ pqs_bus[index] = PciBusNumber(dev);
+ }
+}
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+/*===================================================================
+** Detect all 53c8xx hosts and then attach them.
+**
+** If we are using NVRAM, once all hosts are detected, we need to
+** check any NVRAM for boot order in case detect and boot order
+** differ and attach them using the order in the NVRAM.
+**
+** If no NVRAM is found or data appears invalid attach boards in
+** the the order they are detected.
+**===================================================================
+*/
+int __init sym53c8xx_detect(Scsi_Host_Template *tpnt)
+{
+ pcidev_t pcidev;
+ int i, j, chips, hosts, count;
+ int attach_count = 0;
+ ncr_device *devtbl, *devp;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram nvram0, nvram, *nvp;
+#endif
+
+ /*
+ ** PCI is required.
+ */
+ if (!pci_present())
+ return 0;
+
+ /*
+ ** Initialize driver general stuff.
+ */
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,27)
+ tpnt->proc_dir = &proc_scsi_sym53c8xx;
+#else
+ tpnt->proc_name = NAME53C8XX;
+#endif
+ tpnt->proc_info = sym53c8xx_proc_info;
+#endif
+
+#if defined(SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT) && defined(MODULE)
+if (sym53c8xx)
+ sym53c8xx_setup(sym53c8xx);
+#endif
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = driver_setup.debug;
+#endif
+
+ if (initverbose >= 2)
+ ncr_print_driver_setup();
+
+ /*
+ ** Allocate the device table since we donnot want to
+ ** overflow the kernel stack.
+ ** 1 x 4K PAGE is enough for more than 40 devices for i386.
+ */
+ devtbl = m_calloc(PAGE_SIZE, "devtbl");
+ if (!devtbl)
+ return 0;
+
+ /*
+ ** Detect all NCR PQS/PDS memory controllers.
+ */
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ ncr_detect_pqs_pds();
+#endif
+
+ /*
+ ** Detect all 53c8xx hosts.
+ ** Save the first Symbios NVRAM content if any
+ ** for the boot order.
+ */
+ chips = sizeof(ncr_chip_ids) / sizeof(ncr_chip_ids[0]);
+ hosts = PAGE_SIZE / sizeof(*devtbl);
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ nvp = (driver_setup.use_nvram & 0x1) ? &nvram0 : 0;
+#endif
+ j = 0;
+ count = 0;
+ pcidev = PCIDEV_NULL;
+ while (1) {
+ char *msg = "";
+ if (count >= hosts)
+ break;
+ if (j >= chips)
+ break;
+ i = driver_setup.reverse_probe ? chips - 1 - j : j;
+ pcidev = pci_find_device(PCI_VENDOR_ID_NCR, ncr_chip_ids[i],
+ pcidev);
+ if (pcidev == PCIDEV_NULL) {
+ ++j;
+ continue;
+ }
+ /* Some HW as the HP LH4 may report twice PCI devices */
+ for (i = 0; i < count ; i++) {
+ if (devtbl[i].slot.bus == PciBusNumber(pcidev) &&
+ devtbl[i].slot.device_fn == PciDeviceFn(pcidev))
+ break;
+ }
+ if (i != count) /* Ignore this device if we already have it */
+ continue;
+ devp = &devtbl[count];
+ devp->host_id = driver_setup.host_id;
+ devp->attach_done = 0;
+ if (sym53c8xx_pci_init(tpnt, pcidev, devp)) {
+ continue;
+ }
+ ++count;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvp) {
+ ncr_get_nvram(devp, nvp);
+ switch(nvp->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ /*
+ * Switch to the other nvram buffer, so that
+ * nvram0 will contain the first Symbios
+ * format NVRAM content with boot order.
+ */
+ nvp = &nvram;
+ msg = "with Symbios NVRAM";
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+ msg = "with Tekram NVRAM";
+ break;
+ }
+ }
+#endif
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ if (devp->pqs_pds)
+ msg = "(NCR PQS/PDS)";
+#endif
+ printk(KERN_INFO NAME53C8XX ": 53c%s detected %s\n",
+ devp->chip.name, msg);
+ }
+
+ /*
+ ** If we have found a SYMBIOS NVRAM, use first the NVRAM boot
+ ** sequence as device boot order.
+ ** check devices in the boot record against devices detected.
+ ** attach devices if we find a match. boot table records that
+ ** do not match any detected devices will be ignored.
+ ** devices that do not match any boot table will not be attached
+ ** here but will attempt to be attached during the device table
+ ** rescan.
+ */
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (!nvp || nvram0.type != SCSI_NCR_SYMBIOS_NVRAM)
+ goto next;
+ for (i = 0; i < 4; i++) {
+ Symbios_host *h = &nvram0.data.Symbios.host[i];
+ for (j = 0 ; j < count ; j++) {
+ devp = &devtbl[j];
+ if (h->device_fn != devp->slot.device_fn ||
+ h->bus_nr != devp->slot.bus ||
+ h->device_id != devp->chip.device_id)
+ continue;
+ if (devp->attach_done)
+ continue;
+ if (h->flags & SYMBIOS_INIT_SCAN_AT_BOOT) {
+ ncr_get_nvram(devp, nvp);
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+ else if (!(driver_setup.use_nvram & 0x80))
+ printk(KERN_INFO NAME53C8XX
+ ": 53c%s state OFF thus not attached\n",
+ devp->chip.name);
+ else
+ continue;
+
+ devp->attach_done = 1;
+ break;
+ }
+ }
+next:
+#endif
+
+ /*
+ ** Rescan device list to make sure all boards attached.
+ ** Devices without boot records will not be attached yet
+ ** so try to attach them here.
+ */
+ for (i= 0; i < count; i++) {
+ devp = &devtbl[i];
+ if (!devp->attach_done) {
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_get_nvram(devp, nvp);
+#endif
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+ }
+
+ m_free(devtbl, PAGE_SIZE, "devtbl");
+
+ return attach_count;
+}
+
+/*===================================================================
+** Read and check the PCI configuration for any detected NCR
+** boards and save data for attaching after all boards have
+** been detected.
+**===================================================================
+*/
+static int __init
+sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device)
+{
+ u_short vendor_id, device_id, command, status_reg;
+ u_char cache_line_size, latency_timer;
+ u_char suggested_cache_line_size = 0;
+ u_char pci_fix_up = driver_setup.pci_fix_up;
+ u_char revision;
+ u_int irq;
+ u_long base, base_2, io_port;
+ int i;
+ ncr_chip *chip;
+
+ printk(KERN_INFO NAME53C8XX ": at PCI bus %d, device %d, function %d\n",
+ PciBusNumber(pdev),
+ (int) (PciDeviceFn(pdev) & 0xf8) >> 3,
+ (int) (PciDeviceFn(pdev) & 7));
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ if (!pci_dma_supported(pdev, (dma_addr_t) (0xffffffffUL))) {
+ printk(KERN_WARNING NAME53C8XX
+ "32 BIT PCI BUS DMA ADDRESSING NOT SUPPORTED\n");
+ return -1;
+ }
+#endif
+
+ /*
+ ** Read info from the PCI config space.
+ ** pci_read_config_xxx() functions are assumed to be used for
+ ** successfully detected PCI devices.
+ */
+ vendor_id = PciVendorId(pdev);
+ device_id = PciDeviceId(pdev);
+ irq = PciIrqLine(pdev);
+ i = 0;
+ i = pci_get_base_address(pdev, i, &io_port);
+ i = pci_get_base_address(pdev, i, &base);
+ (void) pci_get_base_address(pdev, i, &base_2);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ /*
+ ** Match the BUS number for PQS/PDS devices.
+ ** Read the SCSI ID from a special register mapped
+ ** into the configuration space of the individual
+ ** 875s. This register is set up by the PQS bios
+ */
+ for(i = 0; i < SCSI_NCR_MAX_PQS_BUS && pqs_bus[i] != -1; i++) {
+ u_char tmp;
+ if (pqs_bus[i] == PciBusNumber(pdev)) {
+ pci_read_config_byte(pdev, 0x84, &tmp);
+ device->pqs_pds = 1;
+ device->host_id = tmp;
+ break;
+ }
+ }
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+ /*
+ ** If user excludes this chip, donnot initialize it.
+ */
+ for (i = 0 ; i < SCSI_NCR_MAX_EXCLUDES ; i++) {
+ if (driver_setup.excludes[i] ==
+ (io_port & PCI_BASE_ADDRESS_IO_MASK))
+ return -1;
+ }
+ /*
+ ** Check if the chip is supported
+ */
+ chip = 0;
+ for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) {
+ if (device_id != ncr_chip_table[i].device_id)
+ continue;
+ if (revision > ncr_chip_table[i].revision_id)
+ continue;
+ if (!(ncr_chip_table[i].features & FE_LDSTR))
+ break;
+ chip = &device->chip;
+ memcpy(chip, &ncr_chip_table[i], sizeof(*chip));
+ chip->revision_id = revision;
+ break;
+ }
+
+ /*
+ ** Ignore Symbios chips controlled by SISL RAID controller.
+ ** This controller sets value 0x52414944 at RAM end - 16.
+ */
+#if defined(__i386__) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
+ if (chip && (base_2 & PCI_BASE_ADDRESS_MEM_MASK)) {
+ unsigned int ram_size, ram_val;
+ u_long ram_ptr;
+
+ if (chip->features & FE_RAM8K)
+ ram_size = 8192;
+ else
+ ram_size = 4096;
+
+ ram_ptr = remap_pci_mem(base_2 & PCI_BASE_ADDRESS_MEM_MASK,
+ ram_size);
+ if (ram_ptr) {
+ ram_val = readl_raw(ram_ptr + ram_size - 16);
+ unmap_pci_mem(ram_ptr, ram_size);
+ if (ram_val == 0x52414944) {
+ printk(NAME53C8XX": not initializing, "
+ "driven by SISL RAID controller.\n");
+ return -1;
+ }
+ }
+ }
+#endif /* i386 and PCI MEMORY accessible */
+
+ if (!chip) {
+ printk(NAME53C8XX ": not initializing, device not supported\n");
+ return -1;
+ }
+
+#ifdef __powerpc__
+ /*
+ ** Fix-up for power/pc.
+ ** Should not be performed by the driver.
+ */
+ if ((command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ != (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": setting%s%s...\n",
+ (command & PCI_COMMAND_IO) ? "" : " PCI_COMMAND_IO",
+ (command & PCI_COMMAND_MEMORY) ? "" : " PCI_COMMAND_MEMORY");
+ command |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,2,0)
+ if ( is_prep ) {
+ if (io_port >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating io_port (Wacky IBM)");
+ io_port = (io_port & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_0, io_port);
+ }
+ if (base >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base (Wacky IBM)");
+ base = (base & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_1, base);
+ }
+ if (base_2 >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base2 (Wacky IBM)");
+ base_2 = (base_2 & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_2, base_2);
+ }
+ }
+#endif
+#endif /* __powerpc__ */
+
+#if defined(__sparc__) && (LINUX_VERSION_CODE < LinuxVersionCode(2,3,0))
+ /*
+ ** Fix-ups for sparc.
+ **
+ ** I wrote: Should not be performed by the driver,
+ ** Guy wrote: but how can OBP know each and every PCI card,
+ ** if they don't use Fcode?
+ ** I replied: no need to know each and every PCI card, just
+ ** be skilled enough to understand the PCI specs.
+ */
+
+ /*
+ ** PCI configuration is based on configuration registers being
+ ** coherent with hardware and software resource identifications.
+ ** This is fairly simple, but seems still too complex for Sparc.
+ */
+ base = __pa(base);
+ base_2 = __pa(base_2);
+
+ if (!cache_line_size)
+ suggested_cache_line_size = 16;
+
+ driver_setup.pci_fix_up |= 0x7;
+
+#endif /* __sparc__ */
+
+#if defined(__i386__) && !defined(MODULE)
+ if (!cache_line_size) {
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,75)
+ extern char x86;
+ switch(x86) {
+#else
+ switch(boot_cpu_data.x86) {
+#endif
+ case 4: suggested_cache_line_size = 4; break;
+ case 6:
+ case 5: suggested_cache_line_size = 8; break;
+ }
+ }
+#endif /* __i386__ */
+
+ /*
+ ** Check availability of IO space, memory space.
+ ** Enable master capability if not yet.
+ **
+ ** We shouldn't have to care about the IO region when
+ ** we are using MMIO. But calling check_region() from
+ ** both the ncr53c8xx and the sym53c8xx drivers prevents
+ ** from attaching devices from the both drivers.
+ ** If you have a better idea, let me know.
+ */
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (!(command & PCI_COMMAND_IO)) {
+ printk(NAME53C8XX ": I/O base address (0x%lx) disabled.\n",
+ (long) io_port);
+ io_port = 0;
+ }
+#endif
+ if (!(command & PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": PCI_COMMAND_MEMORY not set.\n");
+ base = 0;
+ base_2 = 0;
+ }
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (io_port && check_region (io_port, 128)) {
+ printk(NAME53C8XX ": IO region 0x%lx[0..127] is in use\n",
+ (long) io_port);
+ io_port = 0;
+ }
+ if (!io_port)
+ return -1;
+#endif
+#ifndef SCSI_NCR_IOMAPPED
+ if (!base) {
+ printk(NAME53C8XX ": MMIO base address disabled.\n");
+ return -1;
+ }
+#endif
+
+ /*
+ ** Set MASTER capable and PARITY bit, if not yet.
+ */
+ if ((command & (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY))
+ != (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY)) {
+ printk(NAME53C8XX ": setting%s%s...(fix-up)\n",
+ (command & PCI_COMMAND_MASTER) ? "" : " PCI_COMMAND_MASTER",
+ (command & PCI_COMMAND_PARITY) ? "" : " PCI_COMMAND_PARITY");
+ command |= (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+ /*
+ ** Fix some features according to driver setup.
+ */
+ if (!(driver_setup.special_features & 1))
+ chip->features &= ~FE_SPECIAL_SET;
+ else {
+ if (driver_setup.special_features & 2)
+ chip->features &= ~FE_WRIE;
+ if (driver_setup.special_features & 4)
+ chip->features &= ~FE_NOPM;
+ }
+
+ /*
+ ** Work around for errant bit in 895A. The 66Mhz
+ ** capable bit is set erroneously. Clear this bit.
+ ** (Item 1 DEL 533)
+ **
+ ** Make sure Config space and Features agree.
+ **
+ ** Recall: writes are not normal to status register -
+ ** write a 1 to clear and a 0 to leave unchanged.
+ ** Can only reset bits.
+ */
+ if (chip->features & FE_66MHZ) {
+ if (!(status_reg & PCI_STATUS_66MHZ))
+ chip->features &= ~FE_66MHZ;
+ }
+ else {
+ if (status_reg & PCI_STATUS_66MHZ) {
+ status_reg = PCI_STATUS_66MHZ;
+ pci_write_config_word(pdev, PCI_STATUS, status_reg);
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+ }
+ }
+
+ if (driver_setup.ultra_scsi < 3 && (chip->features & FE_ULTRA3)) {
+ chip->features |= FE_ULTRA2;
+ chip->features &= ~FE_ULTRA3;
+ }
+ if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
+ chip->features |= FE_ULTRA;
+ chip->features &= ~FE_ULTRA2;
+ }
+ if (driver_setup.ultra_scsi < 1)
+ chip->features &= ~FE_ULTRA;
+
+ if (!driver_setup.max_wide)
+ chip->features &= ~FE_WIDE;
+
+ /*
+ * C1010 Ultra3 support requires 16 bit data transfers.
+ */
+ if (!driver_setup.max_wide && (chip->features & FE_ULTRA3)) {
+ chip->features |= FE_ULTRA2;
+ chip->features |= ~FE_ULTRA3;
+ }
+
+ /*
+ ** Some features are required to be enabled in order to
+ ** work around some chip problems. :) ;)
+ ** (ITEM 12 of a DEL about the 896 I haven't yet).
+ ** We must ensure the chip will use WRITE AND INVALIDATE.
+ ** The revision number limit is for now arbitrary.
+ */
+ if (device_id == PCI_DEVICE_ID_NCR_53C896 && revision <= 0x10) {
+ chip->features |= (FE_WRIE | FE_CLSE);
+ pci_fix_up |= 3; /* Force appropriate PCI fix-up */
+ }
+
+#ifdef SCSI_NCR_PCI_FIX_UP_SUPPORT
+ /*
+ ** Try to fix up PCI config according to wished features.
+ */
+ if ((pci_fix_up & 1) && (chip->features & FE_CLSE) &&
+ !cache_line_size && suggested_cache_line_size) {
+ cache_line_size = suggested_cache_line_size;
+ pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ printk(NAME53C8XX ": PCI_CACHE_LINE_SIZE set to %d (fix-up).\n",
+ cache_line_size);
+ }
+
+ if ((pci_fix_up & 2) && cache_line_size &&
+ (chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ printk(NAME53C8XX": setting PCI_COMMAND_INVALIDATE (fix-up)\n");
+ command |= PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+ /*
+ ** Tune PCI LATENCY TIMER according to burst max length transfer.
+ ** (latency timer >= burst length + 6, we add 10 to be quite sure)
+ */
+
+ if (chip->burst_max && (latency_timer == 0 || (pci_fix_up & 4))) {
+ uchar lt = (1 << chip->burst_max) + 6 + 10;
+ if (latency_timer < lt) {
+ printk(NAME53C8XX
+ ": changing PCI_LATENCY_TIMER from %d to %d.\n",
+ (int) latency_timer, (int) lt);
+ latency_timer = lt;
+ pci_write_config_byte(pdev,
+ PCI_LATENCY_TIMER, latency_timer);
+ }
+ }
+
+#endif /* SCSI_NCR_PCI_FIX_UP_SUPPORT */
+
+ /*
+ ** Initialise ncr_device structure with items required by ncr_attach.
+ */
+ device->pdev = pdev;
+ device->slot.bus = PciBusNumber(pdev);
+ device->slot.device_fn = PciDeviceFn(pdev);
+ device->slot.base = base;
+ device->slot.base_2 = base_2;
+ device->slot.io_port = io_port;
+ device->slot.irq = irq;
+ device->attach_done = 0;
+
+ return 0;
+}
+
+
+/*===================================================================
+** Detect and try to read SYMBIOS and TEKRAM NVRAM.
+**
+** Data can be used to order booting of boards.
+**
+** Data is saved in ncr_device structure if NVRAM found. This
+** is then used to find drive boot order for ncr_attach().
+**
+** NVRAM data is passed to Scsi_Host_Template later during
+** ncr_attach() for any device set up.
+*===================================================================
+*/
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static void __init ncr_get_nvram(ncr_device *devp, ncr_nvram *nvp)
+{
+ devp->nvram = nvp;
+ if (!nvp)
+ return;
+ /*
+ ** Get access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ request_region(devp->slot.io_port, 128, NAME53C8XX);
+ devp->slot.base_io = devp->slot.io_port;
+#else
+ devp->slot.reg = (struct ncr_reg *) remap_pci_mem(devp->slot.base, 128);
+ if (!devp->slot.reg)
+ return;
+#endif
+
+ /*
+ ** Try to read SYMBIOS nvram.
+ ** Try to read TEKRAM nvram if Symbios nvram not found.
+ */
+ if (!sym_read_Symbios_nvram(&devp->slot, &nvp->data.Symbios))
+ nvp->type = SCSI_NCR_SYMBIOS_NVRAM;
+ else if (!sym_read_Tekram_nvram(&devp->slot, devp->chip.device_id,
+ &nvp->data.Tekram))
+ nvp->type = SCSI_NCR_TEKRAM_NVRAM;
+ else {
+ nvp->type = 0;
+ devp->nvram = 0;
+ }
+
+ /*
+ ** Release access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ release_region(devp->slot.base_io, 128);
+#else
+ unmap_pci_mem((u_long) devp->slot.reg, 128ul);
+#endif
+
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Linux select queue depths function
+*/
+
+#define DEF_DEPTH (driver_setup.default_tags)
+#define ALL_TARGETS -2
+#define NO_TARGET -1
+#define ALL_LUNS -2
+#define NO_LUN -1
+
+static int device_queue_depth(ncb_p np, int target, int lun)
+{
+ int c, h, t, u, v;
+ char *p = driver_setup.tag_ctrl;
+ char *ep;
+
+ h = -1;
+ t = NO_TARGET;
+ u = NO_LUN;
+ while ((c = *p++) != 0) {
+ v = simple_strtoul(p, &ep, 0);
+ switch(c) {
+ case '/':
+ ++h;
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ case 't':
+ if (t != target)
+ t = (target == v) ? v : NO_TARGET;
+ u = ALL_LUNS;
+ break;
+ case 'u':
+ if (u != lun)
+ u = (lun == v) ? v : NO_LUN;
+ break;
+ case 'q':
+ if (h == np->unit &&
+ (t == ALL_TARGETS || t == target) &&
+ (u == ALL_LUNS || u == lun))
+ return v;
+ break;
+ case '-':
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ default:
+ break;
+ }
+ p = ep;
+ }
+ return DEF_DEPTH;
+}
+
+static void sym53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist)
+{
+ struct scsi_device *device;
+
+ for (device = devlist; device; device = device->next) {
+ ncb_p np;
+ tcb_p tp;
+ lcb_p lp;
+ int numtags;
+
+ if (device->host != host)
+ continue;
+
+ np = ((struct host_data *) host->hostdata)->ncb;
+ tp = &np->target[device->id];
+ lp = ncr_lp(np, tp, device->lun);
+
+ /*
+ ** Select queue depth from driver setup.
+ ** Donnot use more than configured by user.
+ ** Use at least 2.
+ ** Donnot use more than our maximum.
+ */
+ numtags = device_queue_depth(np, device->id, device->lun);
+ if (numtags > tp->usrtags)
+ numtags = tp->usrtags;
+ if (!device->tagged_supported)
+ numtags = 1;
+ device->queue_depth = numtags;
+ if (device->queue_depth < 2)
+ device->queue_depth = 2;
+ if (device->queue_depth > MAX_TAGS)
+ device->queue_depth = MAX_TAGS;
+
+ /*
+ ** Since the queue depth is not tunable under Linux,
+ ** we need to know this value in order not to
+ ** announce stupid things to user.
+ */
+ if (lp) {
+ lp->numtags = lp->maxtags = numtags;
+ lp->scdev_depth = device->queue_depth;
+ }
+ ncr_setup_tags (np, device->id, device->lun);
+
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n",
+ np->unit, device->id, device->lun, device->queue_depth);
+#endif
+ }
+}
+
+/*
+** Linux entry point for info() function
+*/
+const char *sym53c8xx_info (struct Scsi_Host *host)
+{
+ return SCSI_NCR_DRIVER_NAME;
+}
+
+/*
+** Linux entry point of queuecommand() function
+*/
+
+int sym53c8xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb;
+ unsigned long flags;
+ int sts;
+
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx_queue_command\n");
+#endif
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.buffer = NULL;
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ cmd->__data_mapped = 0;
+ cmd->__data_mapping = 0;
+#endif
+
+ NCR_LOCK_NCB(np, flags);
+
+ if ((sts = ncr_queue_command(np, cmd)) != DID_OK) {
+ SetScsiResult(cmd, sts, 0);
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx : command not queued - result=%d\n", sts);
+#endif
+ }
+#ifdef DEBUG_SYM53C8XX
+ else
+printk("sym53c8xx : command successfully queued\n");
+#endif
+
+ NCR_UNLOCK_NCB(np, flags);
+
+ if (sts != DID_OK) {
+ unmap_scsi_data(np, cmd);
+ done(cmd);
+ }
+
+ return sts;
+}
+
+/*
+** Linux entry point of the interrupt handler.
+** Since linux versions > 1.3.70, we trust the kernel for
+** passing the internal host descriptor as 'dev_id'.
+** Otherwise, we scan the host list and call the interrupt
+** routine for each host that uses this IRQ.
+*/
+
+static void sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs)
+{
+ unsigned long flags;
+ ncb_p np = (ncb_p) dev_id;
+ Scsi_Cmnd *done_list;
+
+#ifdef DEBUG_SYM53C8XX
+ printk("sym53c8xx : interrupt received\n");
+#endif
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("[");
+
+ NCR_LOCK_NCB(np, flags);
+ ncr_exception(np);
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("]\n");
+
+ if (done_list) {
+ NCR_LOCK_SCSI_DONE(np, flags);
+ ncr_flush_done_cmds(done_list);
+ NCR_UNLOCK_SCSI_DONE(np, flags);
+ }
+}
+
+/*
+** Linux entry point of the timer handler
+*/
+
+static void sym53c8xx_timeout(unsigned long npref)
+{
+ ncb_p np = (ncb_p) npref;
+ unsigned long flags;
+ Scsi_Cmnd *done_list;
+
+ NCR_LOCK_NCB(np, flags);
+ ncr_timeout((ncb_p) np);
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ if (done_list) {
+ NCR_LOCK_SCSI_DONE(np, flags);
+ ncr_flush_done_cmds(done_list);
+ NCR_UNLOCK_SCSI_DONE(np, flags);
+ }
+}
+
+/*
+** Linux entry point of reset() function
+*/
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+int sym53c8xx_reset(Scsi_Cmnd *cmd, unsigned int reset_flags)
+#else
+int sym53c8xx_reset(Scsi_Cmnd *cmd)
+#endif
+{
+ ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb;
+ int sts;
+ unsigned long flags;
+ Scsi_Cmnd *done_list;
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ printk("sym53c8xx_reset: pid=%lu reset_flags=%x serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, reset_flags, cmd->serial_number, cmd->serial_number_at_timeout);
+#else
+ printk("sym53c8xx_reset: command pid %lu\n", cmd->pid);
+#endif
+
+ NCR_LOCK_NCB(np, flags);
+
+ /*
+ * We have to just ignore reset requests in some situations.
+ */
+#if defined SCSI_RESET_NOT_RUNNING
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_RESET_NOT_RUNNING;
+ goto out;
+ }
+#endif
+ /*
+ * If the mid-level driver told us reset is synchronous, it seems
+ * that we must call the done() callback for the involved command,
+ * even if this command was not queued to the low-level driver,
+ * before returning SCSI_RESET_SUCCESS.
+ */
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ sts = ncr_reset_bus(np, cmd,
+ (reset_flags & (SCSI_RESET_SYNCHRONOUS | SCSI_RESET_ASYNCHRONOUS)) == SCSI_RESET_SYNCHRONOUS);
+#else
+ sts = ncr_reset_bus(np, cmd, 0);
+#endif
+
+ /*
+ * Since we always reset the controller, when we return success,
+ * we add this information to the return code.
+ */
+#if defined SCSI_RESET_HOST_RESET
+ if (sts == SCSI_RESET_SUCCESS)
+ sts |= SCSI_RESET_HOST_RESET;
+#endif
+
+out:
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ ncr_flush_done_cmds(done_list);
+
+ return sts;
+}
+
+/*
+** Linux entry point of abort() function
+*/
+
+int sym53c8xx_abort(Scsi_Cmnd *cmd)
+{
+ ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb;
+ int sts;
+ unsigned long flags;
+ Scsi_Cmnd *done_list;
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ printk("sym53c8xx_abort: pid=%lu serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, cmd->serial_number, cmd->serial_number_at_timeout);
+#else
+ printk("sym53c8xx_abort: command pid %lu\n", cmd->pid);
+#endif
+
+ NCR_LOCK_NCB(np, flags);
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ /*
+ * We have to just ignore abort requests in some situations.
+ */
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_ABORT_NOT_RUNNING;
+ goto out;
+ }
+#endif
+
+ sts = ncr_abort_command(np, cmd);
+out:
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ ncr_flush_done_cmds(done_list);
+
+ return sts;
+}
+
+
+#ifdef MODULE
+int sym53c8xx_release(struct Scsi_Host *host)
+{
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx : release\n");
+#endif
+ ncr_detach(((struct host_data *) host->hostdata)->ncb);
+
+ return 1;
+}
+#endif
+
+
+/*
+** Scsi command waiting list management.
+**
+** It may happen that we cannot insert a scsi command into the start queue,
+** in the following circumstances.
+** Too few preallocated ccb(s),
+** maxtags < cmd_per_lun of the Linux host control block,
+** etc...
+** Such scsi commands are inserted into a waiting list.
+** When a scsi command complete, we try to requeue the commands of the
+** waiting list.
+*/
+
+#define next_wcmd host_scribble
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd *wcmd;
+
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ cmd->next_wcmd = 0;
+ if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
+ else {
+ while ((wcmd->next_wcmd) != 0)
+ wcmd = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = (char *) cmd;
+ }
+}
+
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd **pcmd = &np->waiting_list;
+
+ while (*pcmd) {
+ if (cmd == *pcmd) {
+ if (to_remove) {
+ *pcmd = (Scsi_Cmnd *) cmd->next_wcmd;
+ cmd->next_wcmd = 0;
+ }
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ return cmd;
+ }
+ pcmd = (Scsi_Cmnd **) &(*pcmd)->next_wcmd;
+ }
+ return 0;
+}
+
+static void process_waiting_list(ncb_p np, int sts)
+{
+ Scsi_Cmnd *waiting_list, *wcmd;
+
+ waiting_list = np->waiting_list;
+ np->waiting_list = 0;
+
+#ifdef DEBUG_WAITING_LIST
+ if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
+#endif
+ while ((wcmd = waiting_list) != 0) {
+ waiting_list = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = 0;
+ if (sts == DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd);
+#endif
+ sts = ncr_queue_command(np, wcmd);
+ }
+ if (sts != DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
+#endif
+ SetScsiResult(wcmd, sts, 0);
+ ncr_queue_done_cmd(np, wcmd);
+ }
+ }
+}
+
+#undef next_wcmd
+
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+
+/*=========================================================================
+** Proc file system stuff
+**
+** A read operation returns adapter information.
+** A write operation is a control command.
+** The string is parsed in the driver code and the command is passed
+** to the ncr_usercmd() function.
+**=========================================================================
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+#define digit_to_bin(c) ((c) - '0')
+#define is_space(c) ((c) == ' ' || (c) == '\t')
+
+static int skip_spaces(char *ptr, int len)
+{
+ int cnt, c;
+
+ for (cnt = len; cnt > 0 && (c = *ptr++) && is_space(c); cnt--);
+
+ return (len - cnt);
+}
+
+static int get_int_arg(char *ptr, int len, u_long *pv)
+{
+ int cnt, c;
+ u_long v;
+
+ for (v = 0, cnt = len; cnt > 0 && (c = *ptr++) && is_digit(c); cnt--) {
+ v = (v * 10) + digit_to_bin(c);
+ }
+
+ if (pv)
+ *pv = v;
+
+ return (len - cnt);
+}
+
+static int is_keyword(char *ptr, int len, char *verb)
+{
+ int verb_len = strlen(verb);
+
+ if (len >= strlen(verb) && !memcmp(verb, ptr, verb_len))
+ return verb_len;
+ else
+ return 0;
+
+}
+
+#define SKIP_SPACES(min_spaces) \
+ if ((arg_len = skip_spaces(ptr, len)) < (min_spaces)) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+#define GET_INT_ARG(v) \
+ if (!(arg_len = get_int_arg(ptr, len, &(v)))) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+
+/*
+** Parse a control command
+*/
+
+static int ncr_user_command(ncb_p np, char *buffer, int length)
+{
+ char *ptr = buffer;
+ int len = length;
+ struct usrcmd *uc = &np->user;
+ int arg_len;
+ u_long target;
+
+ bzero(uc, sizeof(*uc));
+
+ if (len > 0 && ptr[len-1] == '\n')
+ --len;
+
+ if ((arg_len = is_keyword(ptr, len, "setsync")) != 0)
+ uc->cmd = UC_SETSYNC;
+ else if ((arg_len = is_keyword(ptr, len, "settags")) != 0)
+ uc->cmd = UC_SETTAGS;
+ else if ((arg_len = is_keyword(ptr, len, "setorder")) != 0)
+ uc->cmd = UC_SETORDER;
+ else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0)
+ uc->cmd = UC_SETVERBOSE;
+ else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0)
+ uc->cmd = UC_SETWIDE;
+ else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
+ uc->cmd = UC_SETDEBUG;
+ else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0)
+ uc->cmd = UC_SETFLAG;
+ else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0)
+ uc->cmd = UC_RESETDEV;
+ else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
+ uc->cmd = UC_CLEARDEV;
+ else
+ arg_len = 0;
+
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
+#endif
+
+ if (!arg_len)
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+
+ switch(uc->cmd) {
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ case UC_SETFLAG:
+ case UC_RESETDEV:
+ case UC_CLEARDEV:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
+ ptr += arg_len; len -= arg_len;
+ uc->target = ~0;
+ } else {
+ GET_INT_ARG(target);
+ uc->target = (1<<target);
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: target=%ld\n", target);
+#endif
+ }
+ break;
+ }
+
+ switch(uc->cmd) {
+ case UC_SETVERBOSE:
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ SKIP_SPACES(1);
+ GET_INT_ARG(uc->data);
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETORDER:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "simple")))
+ uc->data = M_SIMPLE_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "ordered")))
+ uc->data = M_ORDERED_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "default")))
+ uc->data = 0;
+ else
+ return -EINVAL;
+ break;
+ case UC_SETDEBUG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "alloc")))
+ uc->data |= DEBUG_ALLOC;
+ else if ((arg_len = is_keyword(ptr, len, "phase")))
+ uc->data |= DEBUG_PHASE;
+ else if ((arg_len = is_keyword(ptr, len, "queue")))
+ uc->data |= DEBUG_QUEUE;
+ else if ((arg_len = is_keyword(ptr, len, "result")))
+ uc->data |= DEBUG_RESULT;
+ else if ((arg_len = is_keyword(ptr, len, "pointer")))
+ uc->data |= DEBUG_POINTER;
+ else if ((arg_len = is_keyword(ptr, len, "script")))
+ uc->data |= DEBUG_SCRIPT;
+ else if ((arg_len = is_keyword(ptr, len, "tiny")))
+ uc->data |= DEBUG_TINY;
+ else if ((arg_len = is_keyword(ptr, len, "timing")))
+ uc->data |= DEBUG_TIMING;
+ else if ((arg_len = is_keyword(ptr, len, "nego")))
+ uc->data |= DEBUG_NEGO;
+ else if ((arg_len = is_keyword(ptr, len, "tags")))
+ uc->data |= DEBUG_TAGS;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETFLAG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "trace")))
+ uc->data |= UF_TRACE;
+ else if ((arg_len = is_keyword(ptr, len, "no_disc")))
+ uc->data |= UF_NODISC;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (len)
+ return -EINVAL;
+ else {
+ long flags;
+
+ NCR_LOCK_NCB(np, flags);
+ ncr_usercmd (np);
+ NCR_UNLOCK_NCB(np, flags);
+ }
+ return length;
+}
+
+#endif /* SCSI_NCR_USER_COMMAND_SUPPORT */
+
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+
+struct info_str
+{
+ char *buffer;
+ int length;
+ int offset;
+ int pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+ if (info->pos + len > info->length)
+ len = info->length - info->pos;
+
+ if (info->pos + len < info->offset) {
+ info->pos += len;
+ return;
+ }
+ if (info->pos < info->offset) {
+ data += (info->offset - info->pos);
+ len -= (info->offset - info->pos);
+ }
+
+ if (len > 0) {
+ memcpy(info->buffer + info->pos, data, len);
+ info->pos += len;
+ }
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+ va_list args;
+ char buf[81];
+ int len;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ copy_mem_info(info, buf, len);
+ return len;
+}
+
+/*
+** Copy formatted information into the input buffer.
+*/
+
+static int ncr_host_info(ncb_p np, char *ptr, off_t offset, int len)
+{
+ struct info_str info;
+#ifdef CONFIG_ALL_PPC
+ struct device_node* of_node;
+#endif
+
+ info.buffer = ptr;
+ info.length = len;
+ info.offset = offset;
+ info.pos = 0;
+
+ copy_info(&info, "General information:\n");
+ copy_info(&info, " Chip " NAME53C "%s, device id 0x%x, "
+ "revision id 0x%x\n",
+ np->chip_name, np->device_id, np->revision_id);
+ copy_info(&info, " On PCI bus %d, device %d, function %d, "
+#ifdef __sparc__
+ "IRQ %s\n",
+#else
+ "IRQ %d\n",
+#endif
+ np->bus, (np->device_fn & 0xf8) >> 3, np->device_fn & 7,
+#ifdef __sparc__
+ __irq_itoa(np->irq));
+#else
+ (int) np->irq);
+#endif
+#ifdef CONFIG_ALL_PPC
+ of_node = find_pci_device_OFnode(np->bus, np->device_fn);
+ if (of_node && of_node->full_name)
+ copy_info(&info, "PPC OpenFirmware path : %s\n", of_node->full_name);
+#endif
+ copy_info(&info, " Synchronous period factor %d, "
+ "max commands per lun %d\n",
+ (int) np->minsync, MAX_TAGS);
+
+ if (driver_setup.debug || driver_setup.verbose > 1) {
+ copy_info(&info, " Debug flags 0x%x, verbosity level %d\n",
+ driver_setup.debug, driver_setup.verbose);
+ }
+
+ return info.pos > info.offset? info.pos - info.offset : 0;
+}
+
+#endif /* SCSI_NCR_USER_INFO_SUPPORT */
+
+/*
+** Entry point of the scsi proc fs of the driver.
+** - func = 0 means read (returns adapter infos)
+** - func = 1 means write (parse user control command)
+*/
+
+static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func)
+{
+ struct Scsi_Host *host;
+ struct host_data *host_data;
+ ncb_p ncb = 0;
+ int retv;
+
+#ifdef DEBUG_PROC_INFO
+printk("sym53c8xx_proc_info: hostno=%d, func=%d\n", hostno, func);
+#endif
+
+ for (host = first_host; host; host = host->next) {
+ if (host->hostt != first_host->hostt)
+ continue;
+ if (host->host_no == hostno) {
+ host_data = (struct host_data *) host->hostdata;
+ ncb = host_data->ncb;
+ break;
+ }
+ }
+
+ if (!ncb)
+ return -EINVAL;
+
+ if (func) {
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+ retv = ncr_user_command(ncb, buffer, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+ else {
+ if (start)
+ *start = buffer;
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+ retv = ncr_host_info(ncb, buffer, offset, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+
+ return retv;
+}
+
+
+/*=========================================================================
+** End of proc file system stuff
+**=========================================================================
+*/
+#endif
+
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/*
+ * 24C16 EEPROM reading.
+ *
+ * GPOI0 - data in/data out
+ * GPIO1 - clock
+ * Symbios NVRAM wiring now also used by Tekram.
+ */
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+/*
+ * Set/clear data/clock bit in GPIO0
+ */
+static void __init
+S24C16_set_bit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode)
+{
+ UDELAY (5);
+ switch (bit_mode){
+ case SET_BIT:
+ *gpreg |= write_bit;
+ break;
+ case CLR_BIT:
+ *gpreg &= 0xfe;
+ break;
+ case SET_CLK:
+ *gpreg |= 0x02;
+ break;
+ case CLR_CLK:
+ *gpreg &= 0xfd;
+ break;
+
+ }
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (5);
+}
+
+/*
+ * Send START condition to NVRAM to wake it up.
+ */
+static void __init S24C16_start(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+static void __init S24C16_stop(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ * Read or write a bit to the NVRAM,
+ * read if GPIO0 input else write if GPIO0 output
+ */
+static void __init
+S24C16_do_bit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg)
+{
+ S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ if (read_bit)
+ *read_bit = INB (nc_gpreg);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ * Output an ACK to the NVRAM after reading,
+ * change GPIO0 to output and when done back to an input
+ */
+static void __init
+S24C16_write_ack(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl & 0xfe);
+ S24C16_do_bit(np, 0, write_bit, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Input an ACK from NVRAM after writing,
+ * change GPIO0 to input and when done back to an output
+ */
+static void __init
+S24C16_read_ack(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl | 0x01);
+ S24C16_do_bit(np, read_bit, 1, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ * GPIO0 must already be set as an output
+ */
+static void __init
+S24C16_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+
+ for (x = 0; x < 8; x++)
+ S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
+
+ S24C16_read_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * READ a byte from the NVRAM and then send an ACK to say we have got it,
+ * GPIO0 must already be set as an input
+ */
+static void __init
+S24C16_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+ u_char read_bit;
+
+ *read_data = 0;
+ for (x = 0; x < 8; x++) {
+ S24C16_do_bit(np, &read_bit, 1, gpreg);
+ *read_data |= ((read_bit & 0x01) << (7 - x));
+ }
+
+ S24C16_write_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * Read 'len' bytes starting at 'offset'.
+ */
+static int __init
+sym_read_S24C16_nvram (ncr_slot *np, int offset, u_char *data, int len)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_char ack_data;
+ int retv = 1;
+ int x;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+ gpcntl = old_gpcntl & 0xfc;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB (nc_gpreg, old_gpreg);
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ S24C16_stop(np, &gpreg);
+
+ /* activate NVRAM */
+ S24C16_start(np, &gpreg);
+
+ /* write device code and random address MSB */
+ S24C16_write_byte(np, &ack_data,
+ 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* write random address LSB */
+ S24C16_write_byte(np, &ack_data,
+ offset & 0xff, &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* regenerate START state to set up for reading */
+ S24C16_start(np, &gpreg);
+
+ /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+ S24C16_write_byte(np, &ack_data,
+ 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* now set up GPIO0 for inputting data */
+ gpcntl |= 0x01;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all requested data - only part of total NVRAM */
+ for (x = 0; x < len; x++)
+ S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
+
+ /* finally put NVRAM back in inactive mode */
+ gpcntl &= 0xfe;
+ OUTB (nc_gpcntl, gpcntl);
+ S24C16_stop(np, &gpreg);
+ retv = 0;
+out:
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+#undef SET_BIT
+#undef CLR_BIT
+#undef SET_CLK
+#undef CLR_CLK
+
+/*
+ * Try reading Symbios NVRAM.
+ * Return 0 if OK.
+ */
+static int __init sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram)
+{
+ static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ /* probe the 24c16 and read the SYMBIOS 24c16 area */
+ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
+ return 1;
+
+ /* check valid NVRAM signature, verify byte count and checksum */
+ if (nvram->type != 0 ||
+ memcmp(nvram->trailer, Symbios_trailer, 6) ||
+ nvram->byte_count != len - 12)
+ return 1;
+
+ /* verify checksum */
+ for (x = 6, csum = 0; x < len - 6; x++)
+ csum += data[x];
+ if (csum != nvram->checksum)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * 93C46 EEPROM reading.
+ *
+ * GPOI0 - data in
+ * GPIO1 - data out
+ * GPIO2 - clock
+ * GPIO4 - chip select
+ *
+ * Used by Tekram.
+ */
+
+/*
+ * Pulse clock bit in GPIO0
+ */
+static void __init T93C46_Clk(ncr_slot *np, u_char *gpreg)
+{
+ OUTB (nc_gpreg, *gpreg | 0x04);
+ UDELAY (2);
+ OUTB (nc_gpreg, *gpreg);
+}
+
+/*
+ * Read bit from NVRAM
+ */
+static void __init T93C46_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg)
+{
+ UDELAY (2);
+ T93C46_Clk(np, gpreg);
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * Write bit to GPIO0
+ */
+static void __init T93C46_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg)
+{
+ if (write_bit & 0x01)
+ *gpreg |= 0x02;
+ else
+ *gpreg &= 0xfd;
+
+ *gpreg |= 0x10;
+
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+static void __init T93C46_Stop(ncr_slot *np, u_char *gpreg)
+{
+ *gpreg &= 0xef;
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send read command and address to NVRAM
+ */
+static void __init
+T93C46_Send_Command(ncr_slot *np, u_short write_data,
+ u_char *read_bit, u_char *gpreg)
+{
+ int x;
+
+ /* send 9 bits, start bit (1), command (2), address (6) */
+ for (x = 0; x < 9; x++)
+ T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * READ 2 bytes from the NVRAM
+ */
+static void __init
+T93C46_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg)
+{
+ int x;
+ u_char read_bit;
+
+ *nvram_data = 0;
+ for (x = 0; x < 16; x++) {
+ T93C46_Read_Bit(np, &read_bit, gpreg);
+
+ if (read_bit & 0x01)
+ *nvram_data |= (0x01 << (15 - x));
+ else
+ *nvram_data &= ~(0x01 << (15 - x));
+ }
+}
+
+/*
+ * Read Tekram NvRAM data.
+ */
+static int __init
+T93C46_Read_Data(ncr_slot *np, u_short *data,int len,u_char *gpreg)
+{
+ u_char read_bit;
+ int x;
+
+ for (x = 0; x < len; x++) {
+
+ /* output read command and address */
+ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+ if (read_bit & 0x01)
+ return 1; /* Bad */
+ T93C46_Read_Word(np, &data[x], gpreg);
+ T93C46_Stop(np, gpreg);
+ }
+
+ return 0;
+}
+
+/*
+ * Try reading 93C46 Tekram NVRAM.
+ */
+static int __init
+sym_read_T93C46_nvram (ncr_slot *np, Tekram_nvram *nvram)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ int retv = 1;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+
+ /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+ 1/2/4 out */
+ gpreg = old_gpreg & 0xe9;
+ OUTB (nc_gpreg, gpreg);
+ gpcntl = (old_gpcntl & 0xe9) | 0x09;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all of NVRAM, 64 words */
+ retv = T93C46_Read_Data(np, (u_short *) nvram,
+ sizeof(*nvram) / sizeof(short), &gpreg);
+
+ /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+/*
+ * Try reading Tekram NVRAM.
+ * Return 0 if OK.
+ */
+static int __init
+sym_read_Tekram_nvram (ncr_slot *np, u_short device_id, Tekram_nvram *nvram)
+{
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ switch (device_id) {
+ case PCI_DEVICE_ID_NCR_53C885:
+ case PCI_DEVICE_ID_NCR_53C895:
+ case PCI_DEVICE_ID_NCR_53C896:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ break;
+ case PCI_DEVICE_ID_NCR_53C875:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ if (!x)
+ break;
+ default:
+ x = sym_read_T93C46_nvram(np, nvram);
+ break;
+ }
+ if (x)
+ return 1;
+
+ /* verify checksum */
+ for (x = 0, csum = 0; x < len - 1; x += 2)
+ csum += data[x] + (data[x+1] << 8);
+ if (csum != 0x1234)
+ return 1;
+
+ return 0;
+}
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Module stuff
+*/
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = SYM53C8XX;
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/sym53c8xx.h b/linux/src/drivers/scsi/sym53c8xx.h
new file mode 100644
index 00000000..128fe165
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx.h
@@ -0,0 +1,116 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+#ifndef SYM53C8XX_H
+#define SYM53C8XX_H
+
+#include "sym53c8xx_defs.h"
+
+/*
+** Define Scsi_Host_Template parameters
+**
+** Used by hosts.c and sym53c8xx.c with module configuration.
+*/
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#include <scsi/scsicam.h>
+
+int sym53c8xx_abort(Scsi_Cmnd *);
+int sym53c8xx_detect(Scsi_Host_Template *tpnt);
+const char *sym53c8xx_info(struct Scsi_Host *host);
+int sym53c8xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int sym53c8xx_reset(Scsi_Cmnd *, unsigned int);
+
+#ifdef MODULE
+int sym53c8xx_release(struct Scsi_Host *);
+#else
+#define sym53c8xx_release NULL
+#endif
+
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,75)
+
+#define SYM53C8XX { name: "", \
+ detect: sym53c8xx_detect, \
+ release: sym53c8xx_release, \
+ info: sym53c8xx_info, \
+ queuecommand: sym53c8xx_queue_command,\
+ abort: sym53c8xx_abort, \
+ reset: sym53c8xx_reset, \
+ bios_param: scsicam_bios_param, \
+ can_queue: SCSI_NCR_CAN_QUEUE, \
+ this_id: 7, \
+ sg_tablesize: SCSI_NCR_SG_TABLESIZE, \
+ cmd_per_lun: SCSI_NCR_CMD_PER_LUN, \
+ use_clustering: DISABLE_CLUSTERING}
+
+#else
+
+#define SYM53C8XX { NULL, NULL, NULL, NULL, \
+ NULL, sym53c8xx_detect, \
+ sym53c8xx_release, sym53c8xx_info, NULL, \
+ sym53c8xx_queue_command,sym53c8xx_abort, \
+ sym53c8xx_reset, NULL, scsicam_bios_param, \
+ SCSI_NCR_CAN_QUEUE, 7, \
+ SCSI_NCR_SG_TABLESIZE, SCSI_NCR_CMD_PER_LUN, \
+ 0, 0, DISABLE_CLUSTERING}
+
+#endif /* LINUX_VERSION_CODE */
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+#endif /* SYM53C8XX_H */
diff --git a/linux/src/drivers/scsi/sym53c8xx_comm.h b/linux/src/drivers/scsi/sym53c8xx_comm.h
new file mode 100644
index 00000000..ba961db0
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx_comm.h
@@ -0,0 +1,2717 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+/*
+** This file contains definitions and code that the
+** sym53c8xx and ncr53c8xx drivers should share.
+** The sharing will be achieved in a further version
+** of the driver bundle. For now, only the ncr53c8xx
+** driver includes this file.
+*/
+
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+
+/*==========================================================
+**
+** Hmmm... What complex some PCI-HOST bridges actually
+** are, despite the fact that the PCI specifications
+** are looking so smart and simple! ;-)
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,47)
+#define SCSI_NCR_DYNAMIC_DMA_MAPPING
+#endif
+
+/*==========================================================
+**
+** Miscallaneous defines.
+**
+**==========================================================
+*/
+
+#define u_char unsigned char
+#define u_short unsigned short
+#define u_int unsigned int
+#define u_long unsigned long
+
+#ifndef bcopy
+#define bcopy(s, d, n) memcpy((d), (s), (n))
+#endif
+
+#ifndef bcmp
+#define bcmp(s, d, n) memcmp((d), (s), (n))
+#endif
+
+#ifndef bzero
+#define bzero(d, n) memset((d), 0, (n))
+#endif
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) (&((t *)0)->m))
+#endif
+
+/*==========================================================
+**
+** assert ()
+**
+**==========================================================
+**
+** modified copy from 386bsd:/usr/include/sys/assert.h
+**
+**----------------------------------------------------------
+*/
+
+#define assert(expression) { \
+ if (!(expression)) { \
+ (void)panic( \
+ "assertion \"%s\" failed: file \"%s\", line %d\n", \
+ #expression, \
+ __FILE__, __LINE__); \
+ } \
+}
+
+/*==========================================================
+**
+** Debugging tags
+**
+**==========================================================
+*/
+
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_POINTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_SCATTER (0x0800)
+#define DEBUG_IC (0x1000)
+
+/*
+** Enable/Disable debug messages.
+** Can be changed at runtime too.
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
+ #define DEBUG_FLAGS ncr_debug
+#else
+ #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
+#endif
+
+/*==========================================================
+**
+** A la VMS/CAM-3 queue management.
+** Implemented from linux list management.
+**
+**==========================================================
+*/
+
+typedef struct xpt_quehead {
+ struct xpt_quehead *flink; /* Forward pointer */
+ struct xpt_quehead *blink; /* Backward pointer */
+} XPT_QUEHEAD;
+
+#define xpt_que_init(ptr) do { \
+ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
+} while (0)
+
+static inline void __xpt_que_add(struct xpt_quehead * new,
+ struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = new;
+ new->flink = flink;
+ new->blink = blink;
+ blink->flink = new;
+}
+
+static inline void __xpt_que_del(struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = blink;
+ blink->flink = flink;
+}
+
+static inline int xpt_que_empty(struct xpt_quehead *head)
+{
+ return head->flink == head;
+}
+
+static inline void xpt_que_splice(struct xpt_quehead *list,
+ struct xpt_quehead *head)
+{
+ struct xpt_quehead *first = list->flink;
+
+ if (first != list) {
+ struct xpt_quehead *last = list->blink;
+ struct xpt_quehead *at = head->flink;
+
+ first->blink = head;
+ head->flink = first;
+
+ last->flink = at;
+ at->blink = last;
+ }
+}
+
+#define xpt_que_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+
+#define xpt_insque(new, pos) __xpt_que_add(new, pos, (pos)->flink)
+
+#define xpt_remque(el) __xpt_que_del((el)->blink, (el)->flink)
+
+#define xpt_insque_head(new, head) __xpt_que_add(new, head, (head)->flink)
+
+static inline struct xpt_quehead *xpt_remque_head(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->flink;
+
+ if (elem != head)
+ __xpt_que_del(head, elem->flink);
+ else
+ elem = 0;
+ return elem;
+}
+
+#define xpt_insque_tail(new, head) __xpt_que_add(new, (head)->blink, head)
+
+static inline struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->blink;
+
+ if (elem != head)
+ __xpt_que_del(elem->blink, head);
+ else
+ elem = 0;
+ return elem;
+}
+
+/*==========================================================
+**
+** Simple Wrapper to kernel PCI bus interface.
+**
+** This wrapper allows to get rid of old kernel PCI
+** interface and still allows to preserve linux-2.0
+** compatibilty. In fact, it is mostly an incomplete
+** emulation of the new PCI code for pre-2.2 kernels.
+** When kernel-2.0 support will be dropped, we will
+** just have to remove most of this code.
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0)
+
+typedef struct pci_dev *pcidev_t;
+#define PCIDEV_NULL (0)
+#define PciBusNumber(d) (d)->bus->number
+#define PciDeviceFn(d) (d)->devfn
+#define PciVendorId(d) (d)->vendor
+#define PciDeviceId(d) (d)->device
+#define PciIrqLine(d) (d)->irq
+
+#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
+
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->resource[index].start;
+ if ((pdev->resource[index].flags & 0x7) == 0x4)
+ ++index;
+ return ++index;
+}
+#else
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->base_address[index++];
+ if ((*base & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ *base |= (((u_long)pdev->base_address[index]) << 32);
+#endif
+ ++index;
+ }
+ return index;
+}
+#endif
+
+#else /* Incomplete emulation of current PCI code for pre-2.2 kernels */
+
+typedef unsigned int pcidev_t;
+#define PCIDEV_NULL (~0u)
+#define PciBusNumber(d) ((d)>>8)
+#define PciDeviceFn(d) ((d)&0xff)
+#define __PciDev(busn, devfn) (((busn)<<8)+(devfn))
+
+#define pci_present pcibios_present
+
+#define pci_read_config_byte(d, w, v) \
+ pcibios_read_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_word(d, w, v) \
+ pcibios_read_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_dword(d, w, v) \
+ pcibios_read_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+#define pci_write_config_byte(d, w, v) \
+ pcibios_write_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_word(d, w, v) \
+ pcibios_write_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_dword(d, w, v) \
+ pcibios_write_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+static pcidev_t __init
+pci_find_device(unsigned int vendor, unsigned int device, pcidev_t prev)
+{
+ static unsigned short pci_index;
+ int retv;
+ unsigned char bus_number, device_fn;
+
+ if (prev == PCIDEV_NULL)
+ pci_index = 0;
+ else
+ ++pci_index;
+ retv = pcibios_find_device (vendor, device, pci_index,
+ &bus_number, &device_fn);
+ return retv ? PCIDEV_NULL : __PciDev(bus_number, device_fn);
+}
+
+static u_short __init PciVendorId(pcidev_t dev)
+{
+ u_short vendor_id;
+ pci_read_config_word(dev, PCI_VENDOR_ID, &vendor_id);
+ return vendor_id;
+}
+
+static u_short __init PciDeviceId(pcidev_t dev)
+{
+ u_short device_id;
+ pci_read_config_word(dev, PCI_DEVICE_ID, &device_id);
+ return device_id;
+}
+
+static u_int __init PciIrqLine(pcidev_t dev)
+{
+ u_char irq;
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ return irq;
+}
+
+static int __init
+pci_get_base_address(pcidev_t dev, int offset, u_long *base)
+{
+ u_int32 tmp;
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base = tmp;
+ offset += sizeof(u_int32);
+ if ((tmp & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base |= (((u_long)tmp) << 32);
+#endif
+ offset += sizeof(u_int32);
+ }
+ return offset;
+}
+
+#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0) */
+
+/*==========================================================
+**
+** SMP threading.
+**
+** Assuming that SMP systems are generally high end
+** systems and may use several SCSI adapters, we are
+** using one lock per controller instead of some global
+** one. For the moment (linux-2.1.95), driver's entry
+** points are called with the 'io_request_lock' lock
+** held, so:
+** - We are uselessly loosing a couple of micro-seconds
+** to lock the controller data structure.
+** - But the driver is not broken by design for SMP and
+** so can be more resistant to bugs or bad changes in
+** the IO sub-system code.
+** - A small advantage could be that the interrupt code
+** is grained as wished (e.g.: by controller).
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+spinlock_t DRIVER_SMP_LOCK = SPIN_LOCK_UNLOCKED;
+#define NCR_LOCK_DRIVER(flags) spin_lock_irqsave(&DRIVER_SMP_LOCK, flags)
+#define NCR_UNLOCK_DRIVER(flags) \
+ spin_unlock_irqrestore(&DRIVER_SMP_LOCK, flags)
+
+#define NCR_INIT_LOCK_NCB(np) spin_lock_init(&np->smp_lock)
+#define NCR_LOCK_NCB(np, flags) spin_lock_irqsave(&np->smp_lock, flags)
+#define NCR_UNLOCK_NCB(np, flags) spin_unlock_irqrestore(&np->smp_lock, flags)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) \
+ spin_lock_irqsave(&io_request_lock, flags)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) \
+ spin_unlock_irqrestore(&io_request_lock, flags)
+
+#else
+
+#define NCR_LOCK_DRIVER(flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_DRIVER(flags) do { restore_flags(flags); } while (0)
+
+#define NCR_INIT_LOCK_NCB(np) do { } while (0)
+#define NCR_LOCK_NCB(np, flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_NCB(np, flags) do { restore_flags(flags); } while (0)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) do {;} while (0)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) do {;} while (0)
+
+#endif
+
+/*==========================================================
+**
+** Memory mapped IO
+**
+** Since linux-2.1, we must use ioremap() to map the io
+** memory space and iounmap() to unmap it. This allows
+** portability. Linux 1.3.X and 2.0.X allow to remap
+** physical pages addresses greater than the highest
+** physical memory address to kernel virtual pages with
+** vremap() / vfree(). That was not portable but worked
+** with i386 architecture.
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#define ioremap vremap
+#define iounmap vfree
+#endif
+
+#ifdef __sparc__
+# include <asm/irq.h>
+# define pcivtobus(p) bus_dvma_to_mem(p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#elif defined(__alpha__)
+# define pcivtobus(p) ((p) & 0xfffffffful)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#else /* others */
+# define pcivtobus(p) (p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#endif
+
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+static u_long __init remap_pci_mem(u_long base, u_long size)
+{
+ u_long page_base = ((u_long) base) & PAGE_MASK;
+ u_long page_offs = ((u_long) base) - page_base;
+ u_long page_remapped = (u_long) ioremap(page_base, page_offs+size);
+
+ return page_remapped? (page_remapped + page_offs) : 0UL;
+}
+
+static void __init unmap_pci_mem(u_long vaddr, u_long size)
+{
+ if (vaddr)
+ iounmap((void *) (vaddr & PAGE_MASK));
+}
+
+#endif /* not def SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+
+/*==========================================================
+**
+** Insert a delay in micro-seconds and milli-seconds.
+**
+** Under Linux, udelay() is restricted to delay <
+** 1 milli-second. In fact, it generally works for up
+** to 1 second delay. Since 2.1.105, the mdelay() function
+** is provided for delays in milli-seconds.
+** Under 2.0 kernels, udelay() is an inline function
+** that is very inaccurate on Pentium processors.
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,105)
+#define UDELAY udelay
+#define MDELAY mdelay
+#else
+static void UDELAY(long us) { udelay(us); }
+static void MDELAY(long ms) { while (ms--) UDELAY(1000); }
+#endif
+
+/*==========================================================
+**
+** Simple power of two buddy-like allocator.
+**
+** This simple code is not intended to be fast, but to
+** provide power of 2 aligned memory allocations.
+** Since the SCRIPTS processor only supplies 8 bit
+** arithmetic, this allocator allows simple and fast
+** address calculations from the SCRIPTS code.
+** In addition, cache line alignment is guaranteed for
+** power of 2 cache line size.
+** Enhanced in linux-2.3.44 to provide a memory pool
+** per pcidev to support dynamic dma mapping. (I would
+** have preferred a real bus astraction, btw).
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+#define __GetFreePages(flags, order) __get_free_pages(flags, order)
+#else
+#define __GetFreePages(flags, order) __get_free_pages(flags, order, 0)
+#endif
+
+#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
+#if PAGE_SIZE >= 8192
+#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
+#else
+#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
+#endif
+#define MEMO_FREE_UNUSED /* Free unused pages immediately */
+#define MEMO_WARN 1
+#define MEMO_GFP_FLAGS GFP_ATOMIC
+#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
+#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
+#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
+
+typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
+typedef pcidev_t m_bush_t; /* Something that addresses DMAable */
+
+typedef struct m_link { /* Link between free memory chunks */
+ struct m_link *next;
+} m_link_s;
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+typedef struct m_vtob { /* Virtual to Bus address translation */
+ struct m_vtob *next;
+ m_addr_t vaddr;
+ m_addr_t baddr;
+} m_vtob_s;
+#define VTOB_HASH_SHIFT 5
+#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
+#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
+#define VTOB_HASH_CODE(m) \
+ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
+#endif
+
+typedef struct m_pool { /* Memory pool of a given kind */
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ m_bush_t bush;
+ m_addr_t (*getp)(struct m_pool *);
+ void (*freep)(struct m_pool *, m_addr_t);
+#define M_GETP() mp->getp(mp)
+#define M_FREEP(p) mp->freep(mp, p)
+#define GetPages() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define FreePages(p) free_pages(p, MEMO_PAGE_ORDER)
+ int nump;
+ m_vtob_s *(vtob[VTOB_HASH_SIZE]);
+ struct m_pool *next;
+#else
+#define M_GETP() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define M_FREEP(p) free_pages(p, MEMO_PAGE_ORDER)
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+ struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
+} m_pool_s;
+
+static void *___m_alloc(m_pool_s *mp, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ int j;
+ m_addr_t a;
+ m_link_s *h = mp->h;
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return 0;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ j = i;
+ while (!h[j].next) {
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ h[j].next = (m_link_s *) M_GETP();
+ if (h[j].next)
+ h[j].next->next = 0;
+ break;
+ }
+ ++j;
+ s <<= 1;
+ }
+ a = (m_addr_t) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (m_link_s *) (a+s);
+ h[j].next->next = 0;
+ }
+ }
+#ifdef DEBUG
+ printk("___m_alloc(%d) = %p\n", size, (void *) a);
+#endif
+ return (void *) a;
+}
+
+static void ___m_free(m_pool_s *mp, void *ptr, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ m_link_s *q;
+ m_addr_t a, b;
+ m_link_s *h = mp->h;
+
+#ifdef DEBUG
+ printk("___m_free(%p, %d)\n", ptr, size);
+#endif
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ a = (m_addr_t) ptr;
+
+ while (1) {
+#ifdef MEMO_FREE_UNUSED
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ M_FREEP(a);
+ break;
+ }
+#endif
+ b = a ^ s;
+ q = &h[i];
+ while (q->next && q->next != (m_link_s *) b) {
+ q = q->next;
+ }
+ if (!q->next) {
+ ((m_link_s *) a)->next = h[i].next;
+ h[i].next = (m_link_s *) a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ ++i;
+ }
+}
+
+static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
+{
+ void *p;
+
+ p = ___m_alloc(mp, size);
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("new %-10s[%4d] @%p.\n", name, size, p);
+
+ if (p)
+ bzero(p, size);
+ else if (uflags & MEMO_WARN)
+ printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
+
+ return p;
+}
+
+#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
+
+static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
+{
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
+
+ ___m_free(mp, ptr, size);
+
+}
+
+/*
+ * With pci bus iommu support, we use a default pool of unmapped memory
+ * for memory we donnot need to DMA from/to and one pool per pcidev for
+ * memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+static m_pool_s mp0;
+
+#else
+
+static m_addr_t ___mp0_getp(m_pool_s *mp)
+{
+ m_addr_t m = GetPages();
+ if (m)
+ ++mp->nump;
+ return m;
+}
+
+static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
+{
+ FreePages(m);
+ --mp->nump;
+}
+
+static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+static void *m_calloc(int size, char *name)
+{
+ u_long flags;
+ void *m;
+ NCR_LOCK_DRIVER(flags);
+ m = __m_calloc(&mp0, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+ return m;
+}
+
+static void m_free(void *ptr, int size, char *name)
+{
+ u_long flags;
+ NCR_LOCK_DRIVER(flags);
+ __m_free(&mp0, ptr, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+/*
+ * DMAable pools.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Without pci bus iommu support, all the memory is assumed DMAable */
+
+#define __m_calloc_dma(b, s, n) m_calloc(s, n)
+#define __m_free_dma(b, p, s, n) m_free(p, s, n)
+#define __vtobus(b, p) virt_to_bus(p)
+
+#else
+
+/*
+ * With pci bus iommu support, we maintain one pool per pcidev and a
+ * hashed reverse table for virtual to bus physical address translations.
+ */
+static m_addr_t ___dma_getp(m_pool_s *mp)
+{
+ m_addr_t vp;
+ m_vtob_s *vbp;
+
+ vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
+ if (vbp) {
+ dma_addr_t daddr;
+ vp = (m_addr_t) pci_alloc_consistent(mp->bush,
+ PAGE_SIZE<<MEMO_PAGE_ORDER,
+ &daddr);
+ if (vp) {
+ int hc = VTOB_HASH_CODE(vp);
+ vbp->vaddr = vp;
+ vbp->baddr = daddr;
+ vbp->next = mp->vtob[hc];
+ mp->vtob[hc] = vbp;
+ ++mp->nump;
+ return vp;
+ }
+ }
+ if (vbp)
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ return 0;
+}
+
+static void ___dma_freep(m_pool_s *mp, m_addr_t m)
+{
+ m_vtob_s **vbpp, *vbp;
+ int hc = VTOB_HASH_CODE(m);
+
+ vbpp = &mp->vtob[hc];
+ while (*vbpp && (*vbpp)->vaddr != m)
+ vbpp = &(*vbpp)->next;
+ if (*vbpp) {
+ vbp = *vbpp;
+ *vbpp = (*vbpp)->next;
+ pci_free_consistent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
+ (void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ --mp->nump;
+ }
+}
+
+static inline m_pool_s *___get_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
+ return mp;
+}
+
+static m_pool_s *___cre_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
+ if (mp) {
+ bzero(mp, sizeof(*mp));
+ mp->bush = bush;
+ mp->getp = ___dma_getp;
+ mp->freep = ___dma_freep;
+ mp->next = mp0.next;
+ mp0.next = mp;
+ }
+ return mp;
+}
+
+static void ___del_dma_pool(m_pool_s *p)
+{
+ struct m_pool **pp = &mp0.next;
+
+ while (*pp && *pp != p)
+ pp = &(*pp)->next;
+ if (*pp) {
+ *pp = (*pp)->next;
+ __m_free(&mp0, p, sizeof(*p), "MPOOL");
+ }
+}
+
+static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+ void *m = 0;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (!mp)
+ mp = ___cre_dma_pool(bush);
+ if (mp)
+ m = __m_calloc(mp, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+
+ return m;
+}
+
+static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp)
+ __m_free(mp, m, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+static m_addr_t __vtobus(m_bush_t bush, void *m)
+{
+ u_long flags;
+ m_pool_s *mp;
+ int hc = VTOB_HASH_CODE(m);
+ m_vtob_s *vp = 0;
+ m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp) {
+ vp = mp->vtob[hc];
+ while (vp && (m_addr_t) vp->vaddr != a)
+ vp = vp->next;
+ }
+ NCR_UNLOCK_DRIVER(flags);
+ return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
+}
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->pdev, s, n)
+#define _m_free_dma(np, p, s, n) __m_free_dma(np->pdev, p, s, n)
+#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
+#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
+#define _vtobus(np, p) __vtobus(np->pdev, p)
+#define vtobus(p) _vtobus(np, p)
+
+/*
+ * Deal with DMA mapping/unmapping.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Linux versions prior to pci bus iommu kernel interface */
+
+#define __unmap_scsi_data(pdev, cmd) do {; } while (0)
+#define __map_scsi_single_data(pdev, cmd) (__vtobus(pdev,(cmd)->request_buffer))
+#define __map_scsi_sg_data(pdev, cmd) ((cmd)->use_sg)
+#define __sync_scsi_data(pdev, cmd) do {; } while (0)
+
+#define scsi_sg_dma_address(sc) vtobus((sc)->address)
+#define scsi_sg_dma_len(sc) ((sc)->length)
+
+#else
+
+/* Linux version with pci bus iommu kernel interface */
+
+/* To keep track of the dma mapping (sg/single) that has been set */
+#define __data_mapped SCp.phase
+#define __data_mapping SCp.have_data_in
+
+static void __unmap_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_unmap_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+ cmd->__data_mapped = 0;
+}
+
+static u_long __map_scsi_single_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ dma_addr_t mapping;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->request_bufflen == 0)
+ return 0;
+
+ mapping = pci_map_single(pdev, cmd->request_buffer,
+ cmd->request_bufflen, dma_dir);
+ cmd->__data_mapped = 1;
+ cmd->__data_mapping = mapping;
+
+ return mapping;
+}
+
+static int __map_scsi_sg_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int use_sg;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->use_sg == 0)
+ return 0;
+
+ use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ cmd->__data_mapped = 2;
+ cmd->__data_mapping = use_sg;
+
+ return use_sg;
+}
+
+static void __sync_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_dma_sync_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_dma_sync_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+}
+
+#define scsi_sg_dma_address(sc) sg_dma_address(sc)
+#define scsi_sg_dma_len(sc) sg_dma_len(sc)
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->pdev, cmd)
+#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->pdev, cmd)
+#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->pdev, cmd)
+#define sync_scsi_data(np, cmd) __sync_scsi_data(np->pdev, cmd)
+
+/*==========================================================
+**
+** SCSI data transfer direction
+**
+** Until some linux kernel version near 2.3.40,
+** low-level scsi drivers were not told about data
+** transfer direction. We check the existence of this
+** feature that has been expected for a _long_ time by
+** all SCSI driver developers by just testing against
+** the definition of SCSI_DATA_UNKNOWN. Indeed this is
+** a hack, but testing against a kernel version would
+** have been a shame. ;-)
+**
+**==========================================================
+*/
+#ifdef SCSI_DATA_UNKNOWN
+
+#define scsi_data_direction(cmd) (cmd->sc_data_direction)
+
+#else
+
+#define SCSI_DATA_UNKNOWN 0
+#define SCSI_DATA_WRITE 1
+#define SCSI_DATA_READ 2
+#define SCSI_DATA_NONE 3
+
+static __inline__ int scsi_data_direction(Scsi_Cmnd *cmd)
+{
+ int direction;
+
+ switch((int) cmd->cmnd[0]) {
+ case 0x08: /* READ(6) 08 */
+ case 0x28: /* READ(10) 28 */
+ case 0xA8: /* READ(12) A8 */
+ direction = SCSI_DATA_READ;
+ break;
+ case 0x0A: /* WRITE(6) 0A */
+ case 0x2A: /* WRITE(10) 2A */
+ case 0xAA: /* WRITE(12) AA */
+ direction = SCSI_DATA_WRITE;
+ break;
+ default:
+ direction = SCSI_DATA_UNKNOWN;
+ break;
+ }
+
+ return direction;
+}
+
+#endif /* SCSI_DATA_UNKNOWN */
+
+/*==========================================================
+**
+** Driver setup.
+**
+** This structure is initialized from linux config
+** options. It can be overridden at boot-up by the boot
+** command line.
+**
+**==========================================================
+*/
+static struct ncr_driver_setup
+ driver_setup = SCSI_NCR_DRIVER_SETUP;
+
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+static struct ncr_driver_setup
+ driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
+#endif
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+
+/*==========================================================
+**
+** Structures used by the detection routine to transmit
+** device configuration to the attach function.
+**
+**==========================================================
+*/
+typedef struct {
+ int bus;
+ u_char device_fn;
+ u_long base;
+ u_long base_2;
+ u_long io_port;
+ int irq;
+/* port and reg fields to use INB, OUTB macros */
+ u_long base_io;
+ volatile struct ncr_reg *reg;
+} ncr_slot;
+
+/*==========================================================
+**
+** Structure used to store the NVRAM content.
+**
+**==========================================================
+*/
+typedef struct {
+ int type;
+#define SCSI_NCR_SYMBIOS_NVRAM (1)
+#define SCSI_NCR_TEKRAM_NVRAM (2)
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ union {
+ Symbios_nvram Symbios;
+ Tekram_nvram Tekram;
+ } data;
+#endif
+} ncr_nvram;
+
+/*==========================================================
+**
+** Structure used by detection routine to save data on
+** each detected board for attach.
+**
+**==========================================================
+*/
+typedef struct {
+ pcidev_t pdev;
+ ncr_slot slot;
+ ncr_chip chip;
+ ncr_nvram *nvram;
+ u_char host_id;
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ u_char pqs_pds;
+#endif
+ int attach_done;
+} ncr_device;
+
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device);
+
+/*==========================================================
+**
+** NVRAM detection and reading.
+**
+** Currently supported:
+** - 24C16 EEPROM with both Symbios and Tekram layout.
+** - 93C46 EEPROM with Tekram layout.
+**
+**==========================================================
+*/
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+/*
+ * 24C16 EEPROM reading.
+ *
+ * GPOI0 - data in/data out
+ * GPIO1 - clock
+ * Symbios NVRAM wiring now also used by Tekram.
+ */
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+/*
+ * Set/clear data/clock bit in GPIO0
+ */
+static void __init
+S24C16_set_bit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode)
+{
+ UDELAY (5);
+ switch (bit_mode){
+ case SET_BIT:
+ *gpreg |= write_bit;
+ break;
+ case CLR_BIT:
+ *gpreg &= 0xfe;
+ break;
+ case SET_CLK:
+ *gpreg |= 0x02;
+ break;
+ case CLR_CLK:
+ *gpreg &= 0xfd;
+ break;
+
+ }
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (5);
+}
+
+/*
+ * Send START condition to NVRAM to wake it up.
+ */
+static void __init S24C16_start(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+static void __init S24C16_stop(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ * Read or write a bit to the NVRAM,
+ * read if GPIO0 input else write if GPIO0 output
+ */
+static void __init
+S24C16_do_bit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg)
+{
+ S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ if (read_bit)
+ *read_bit = INB (nc_gpreg);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ * Output an ACK to the NVRAM after reading,
+ * change GPIO0 to output and when done back to an input
+ */
+static void __init
+S24C16_write_ack(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl & 0xfe);
+ S24C16_do_bit(np, 0, write_bit, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Input an ACK from NVRAM after writing,
+ * change GPIO0 to input and when done back to an output
+ */
+static void __init
+S24C16_read_ack(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl | 0x01);
+ S24C16_do_bit(np, read_bit, 1, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ * GPIO0 must already be set as an output
+ */
+static void __init
+S24C16_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+
+ for (x = 0; x < 8; x++)
+ S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
+
+ S24C16_read_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * READ a byte from the NVRAM and then send an ACK to say we have got it,
+ * GPIO0 must already be set as an input
+ */
+static void __init
+S24C16_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+ u_char read_bit;
+
+ *read_data = 0;
+ for (x = 0; x < 8; x++) {
+ S24C16_do_bit(np, &read_bit, 1, gpreg);
+ *read_data |= ((read_bit & 0x01) << (7 - x));
+ }
+
+ S24C16_write_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * Read 'len' bytes starting at 'offset'.
+ */
+static int __init
+sym_read_S24C16_nvram (ncr_slot *np, int offset, u_char *data, int len)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_char ack_data;
+ int retv = 1;
+ int x;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+ gpcntl = old_gpcntl & 0xfc;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB (nc_gpreg, old_gpreg);
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ S24C16_stop(np, &gpreg);
+
+ /* activate NVRAM */
+ S24C16_start(np, &gpreg);
+
+ /* write device code and random address MSB */
+ S24C16_write_byte(np, &ack_data,
+ 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* write random address LSB */
+ S24C16_write_byte(np, &ack_data,
+ offset & 0xff, &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* regenerate START state to set up for reading */
+ S24C16_start(np, &gpreg);
+
+ /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+ S24C16_write_byte(np, &ack_data,
+ 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* now set up GPIO0 for inputting data */
+ gpcntl |= 0x01;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all requested data - only part of total NVRAM */
+ for (x = 0; x < len; x++)
+ S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
+
+ /* finally put NVRAM back in inactive mode */
+ gpcntl &= 0xfe;
+ OUTB (nc_gpcntl, gpcntl);
+ S24C16_stop(np, &gpreg);
+ retv = 0;
+out:
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+#undef SET_BIT 0
+#undef CLR_BIT 1
+#undef SET_CLK 2
+#undef CLR_CLK 3
+
+/*
+ * Try reading Symbios NVRAM.
+ * Return 0 if OK.
+ */
+static int __init sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram)
+{
+ static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ /* probe the 24c16 and read the SYMBIOS 24c16 area */
+ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
+ return 1;
+
+ /* check valid NVRAM signature, verify byte count and checksum */
+ if (nvram->type != 0 ||
+ memcmp(nvram->trailer, Symbios_trailer, 6) ||
+ nvram->byte_count != len - 12)
+ return 1;
+
+ /* verify checksum */
+ for (x = 6, csum = 0; x < len - 6; x++)
+ csum += data[x];
+ if (csum != nvram->checksum)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * 93C46 EEPROM reading.
+ *
+ * GPOI0 - data in
+ * GPIO1 - data out
+ * GPIO2 - clock
+ * GPIO4 - chip select
+ *
+ * Used by Tekram.
+ */
+
+/*
+ * Pulse clock bit in GPIO0
+ */
+static void __init T93C46_Clk(ncr_slot *np, u_char *gpreg)
+{
+ OUTB (nc_gpreg, *gpreg | 0x04);
+ UDELAY (2);
+ OUTB (nc_gpreg, *gpreg);
+}
+
+/*
+ * Read bit from NVRAM
+ */
+static void __init T93C46_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg)
+{
+ UDELAY (2);
+ T93C46_Clk(np, gpreg);
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * Write bit to GPIO0
+ */
+static void __init T93C46_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg)
+{
+ if (write_bit & 0x01)
+ *gpreg |= 0x02;
+ else
+ *gpreg &= 0xfd;
+
+ *gpreg |= 0x10;
+
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+static void __init T93C46_Stop(ncr_slot *np, u_char *gpreg)
+{
+ *gpreg &= 0xef;
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send read command and address to NVRAM
+ */
+static void __init
+T93C46_Send_Command(ncr_slot *np, u_short write_data,
+ u_char *read_bit, u_char *gpreg)
+{
+ int x;
+
+ /* send 9 bits, start bit (1), command (2), address (6) */
+ for (x = 0; x < 9; x++)
+ T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * READ 2 bytes from the NVRAM
+ */
+static void __init
+T93C46_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg)
+{
+ int x;
+ u_char read_bit;
+
+ *nvram_data = 0;
+ for (x = 0; x < 16; x++) {
+ T93C46_Read_Bit(np, &read_bit, gpreg);
+
+ if (read_bit & 0x01)
+ *nvram_data |= (0x01 << (15 - x));
+ else
+ *nvram_data &= ~(0x01 << (15 - x));
+ }
+}
+
+/*
+ * Read Tekram NvRAM data.
+ */
+static int __init
+T93C46_Read_Data(ncr_slot *np, u_short *data,int len,u_char *gpreg)
+{
+ u_char read_bit;
+ int x;
+
+ for (x = 0; x < len; x++) {
+
+ /* output read command and address */
+ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+ if (read_bit & 0x01)
+ return 1; /* Bad */
+ T93C46_Read_Word(np, &data[x], gpreg);
+ T93C46_Stop(np, gpreg);
+ }
+
+ return 0;
+}
+
+/*
+ * Try reading 93C46 Tekram NVRAM.
+ */
+static int __init
+sym_read_T93C46_nvram (ncr_slot *np, Tekram_nvram *nvram)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ int retv = 1;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+
+ /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+ 1/2/4 out */
+ gpreg = old_gpreg & 0xe9;
+ OUTB (nc_gpreg, gpreg);
+ gpcntl = (old_gpcntl & 0xe9) | 0x09;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all of NVRAM, 64 words */
+ retv = T93C46_Read_Data(np, (u_short *) nvram,
+ sizeof(*nvram) / sizeof(short), &gpreg);
+
+ /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+/*
+ * Try reading Tekram NVRAM.
+ * Return 0 if OK.
+ */
+static int __init
+sym_read_Tekram_nvram (ncr_slot *np, u_short device_id, Tekram_nvram *nvram)
+{
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ switch (device_id) {
+ case PCI_DEVICE_ID_NCR_53C885:
+ case PCI_DEVICE_ID_NCR_53C895:
+ case PCI_DEVICE_ID_NCR_53C896:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ break;
+ case PCI_DEVICE_ID_NCR_53C875:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ if (!x)
+ break;
+ default:
+ x = sym_read_T93C46_nvram(np, nvram);
+ break;
+ }
+ if (x)
+ return 1;
+
+ /* verify checksum */
+ for (x = 0, csum = 0; x < len - 1; x += 2)
+ csum += data[x] + (data[x+1] << 8);
+ if (csum != 0x1234)
+ return 1;
+
+ return 0;
+}
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*===================================================================
+**
+** Detect and try to read SYMBIOS and TEKRAM NVRAM.
+**
+** Data can be used to order booting of boards.
+**
+** Data is saved in ncr_device structure if NVRAM found. This
+** is then used to find drive boot order for ncr_attach().
+**
+** NVRAM data is passed to Scsi_Host_Template later during
+** ncr_attach() for any device set up.
+**
+**===================================================================
+*/
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static void __init ncr_get_nvram(ncr_device *devp, ncr_nvram *nvp)
+{
+ devp->nvram = nvp;
+ if (!nvp)
+ return;
+ /*
+ ** Get access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ request_region(devp->slot.io_port, 128, NAME53C8XX);
+ devp->slot.base_io = devp->slot.io_port;
+#else
+ devp->slot.reg = (struct ncr_reg *) remap_pci_mem(devp->slot.base, 128);
+ if (!devp->slot.reg)
+ return;
+#endif
+
+ /*
+ ** Try to read SYMBIOS nvram.
+ ** Try to read TEKRAM nvram if Symbios nvram not found.
+ */
+ if (!sym_read_Symbios_nvram(&devp->slot, &nvp->data.Symbios))
+ nvp->type = SCSI_NCR_SYMBIOS_NVRAM;
+ else if (!sym_read_Tekram_nvram(&devp->slot, devp->chip.device_id,
+ &nvp->data.Tekram))
+ nvp->type = SCSI_NCR_TEKRAM_NVRAM;
+ else {
+ nvp->type = 0;
+ devp->nvram = 0;
+ }
+
+ /*
+ ** Release access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ release_region(devp->slot.base_io, 128);
+#else
+ unmap_pci_mem((u_long) devp->slot.reg, 128ul);
+#endif
+
+}
+
+/*===================================================================
+**
+** Display the content of NVRAM for debugging purpose.
+**
+**===================================================================
+*/
+#ifdef SCSI_NCR_DEBUG_NVRAM
+static void __init ncr_display_Symbios_nvram(Symbios_nvram *nvram)
+{
+ int i;
+
+ /* display Symbios nvram host data */
+ printk(KERN_DEBUG NAME53C8XX ": HOST ID=%d%s%s%s%s%s\n",
+ nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
+ (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+
+ /* display Symbios nvram drive data */
+ for (i = 0 ; i < 15 ; i++) {
+ struct Symbios_target *tn = &nvram->target[i];
+ printk(KERN_DEBUG NAME53C8XX
+ "-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+ i,
+ (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
+ (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
+ tn->bus_width,
+ tn->sync_period / 4,
+ tn->timeout);
+ }
+}
+
+static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120};
+
+static void __init ncr_display_Tekram_nvram(Tekram_nvram *nvram)
+{
+ int i, tags, boot_delay;
+ char *rem;
+
+ /* display Tekram nvram host data */
+ tags = 2 << nvram->max_tags_index;
+ boot_delay = 0;
+ if (nvram->boot_delay_index < 6)
+ boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+ switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+ default:
+ case 0: rem = ""; break;
+ case 1: rem = " REMOVABLE=boot device"; break;
+ case 2: rem = " REMOVABLE=all"; break;
+ }
+
+ printk(KERN_DEBUG NAME53C8XX
+ ": HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES":"",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
+ rem, boot_delay, tags);
+
+ /* display Tekram nvram drive data */
+ for (i = 0; i <= 15; i++) {
+ int sync, j;
+ struct Tekram_target *tn = &nvram->target[i];
+ j = tn->sync_index & 0xf;
+ sync = Tekram_sync[j];
+ printk(KERN_DEBUG NAME53C8XX "-%d:%s%s%s%s%s%s PERIOD=%d\n",
+ i,
+ (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
+ (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
+ (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & TEKRAM_START_CMD) ? " START" : "",
+ (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
+ (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
+ sync);
+ }
+}
+#endif /* SCSI_NCR_DEBUG_NVRAM */
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+
+/*===================================================================
+**
+** Utility routines that protperly return data through /proc FS.
+**
+**===================================================================
+*/
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+
+struct info_str
+{
+ char *buffer;
+ int length;
+ int offset;
+ int pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+ if (info->pos + len > info->length)
+ len = info->length - info->pos;
+
+ if (info->pos + len < info->offset) {
+ info->pos += len;
+ return;
+ }
+ if (info->pos < info->offset) {
+ data += (info->offset - info->pos);
+ len -= (info->offset - info->pos);
+ }
+
+ if (len > 0) {
+ memcpy(info->buffer + info->pos, data, len);
+ info->pos += len;
+ }
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+ va_list args;
+ char buf[81];
+ int len;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ copy_mem_info(info, buf, len);
+ return len;
+}
+
+#endif
+
+/*===================================================================
+**
+** Driver setup from the boot command line
+**
+**===================================================================
+*/
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+#define OPT_TAGS 1
+#define OPT_MASTER_PARITY 2
+#define OPT_SCSI_PARITY 3
+#define OPT_DISCONNECTION 4
+#define OPT_SPECIAL_FEATURES 5
+#define OPT_ULTRA_SCSI 6
+#define OPT_FORCE_SYNC_NEGO 7
+#define OPT_REVERSE_PROBE 8
+#define OPT_DEFAULT_SYNC 9
+#define OPT_VERBOSE 10
+#define OPT_DEBUG 11
+#define OPT_BURST_MAX 12
+#define OPT_LED_PIN 13
+#define OPT_MAX_WIDE 14
+#define OPT_SETTLE_DELAY 15
+#define OPT_DIFF_SUPPORT 16
+#define OPT_IRQM 17
+#define OPT_PCI_FIX_UP 18
+#define OPT_BUS_CHECK 19
+#define OPT_OPTIMIZE 20
+#define OPT_RECOVERY 21
+#define OPT_SAFE_SETUP 22
+#define OPT_USE_NVRAM 23
+#define OPT_EXCLUDE 24
+#define OPT_HOST_ID 25
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define OPT_IARB 26
+#endif
+
+static char setup_token[] __initdata =
+ "tags:" "mpar:"
+ "spar:" "disc:"
+ "specf:" "ultra:"
+ "fsn:" "revprob:"
+ "sync:" "verb:"
+ "debug:" "burst:"
+ "led:" "wide:"
+ "settle:" "diff:"
+ "irqm:" "pcifix:"
+ "buschk:" "optim:"
+ "recovery:"
+ "safe:" "nvram:"
+ "excl:" "hostid:"
+#ifdef SCSI_NCR_IARB_SUPPORT
+ "iarb:"
+#endif
+ ; /* DONNOT REMOVE THIS ';' */
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+static int __init get_setup_token(char *p)
+{
+ char *cur = setup_token;
+ char *pc;
+ int i = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ ++pc;
+ ++i;
+ if (!strncmp(p, cur, pc - cur))
+ return i;
+ cur = pc;
+ }
+ return 0;
+}
+
+
+static int __init sym53c8xx__setup(char *str)
+{
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+ char *cur = str;
+ char *pc, *pv;
+ int i, val, c;
+ int xi = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ char *pe;
+
+ val = 0;
+ pv = pc;
+ c = *++pv;
+
+ if (c == 'n')
+ val = 0;
+ else if (c == 'y')
+ val = 1;
+ else
+ val = (int) simple_strtoul(pv, &pe, 0);
+
+ switch (get_setup_token(cur)) {
+ case OPT_TAGS:
+ driver_setup.default_tags = val;
+ if (pe && *pe == '/') {
+ i = 0;
+ while (*pe && *pe != ARG_SEP &&
+ i < sizeof(driver_setup.tag_ctrl)-1) {
+ driver_setup.tag_ctrl[i++] = *pe++;
+ }
+ driver_setup.tag_ctrl[i] = '\0';
+ }
+ break;
+ case OPT_MASTER_PARITY:
+ driver_setup.master_parity = val;
+ break;
+ case OPT_SCSI_PARITY:
+ driver_setup.scsi_parity = val;
+ break;
+ case OPT_DISCONNECTION:
+ driver_setup.disconnection = val;
+ break;
+ case OPT_SPECIAL_FEATURES:
+ driver_setup.special_features = val;
+ break;
+ case OPT_ULTRA_SCSI:
+ driver_setup.ultra_scsi = val;
+ break;
+ case OPT_FORCE_SYNC_NEGO:
+ driver_setup.force_sync_nego = val;
+ break;
+ case OPT_REVERSE_PROBE:
+ driver_setup.reverse_probe = val;
+ break;
+ case OPT_DEFAULT_SYNC:
+ driver_setup.default_sync = val;
+ break;
+ case OPT_VERBOSE:
+ driver_setup.verbose = val;
+ break;
+ case OPT_DEBUG:
+ driver_setup.debug = val;
+ break;
+ case OPT_BURST_MAX:
+ driver_setup.burst_max = val;
+ break;
+ case OPT_LED_PIN:
+ driver_setup.led_pin = val;
+ break;
+ case OPT_MAX_WIDE:
+ driver_setup.max_wide = val? 1:0;
+ break;
+ case OPT_SETTLE_DELAY:
+ driver_setup.settle_delay = val;
+ break;
+ case OPT_DIFF_SUPPORT:
+ driver_setup.diff_support = val;
+ break;
+ case OPT_IRQM:
+ driver_setup.irqm = val;
+ break;
+ case OPT_PCI_FIX_UP:
+ driver_setup.pci_fix_up = val;
+ break;
+ case OPT_BUS_CHECK:
+ driver_setup.bus_check = val;
+ break;
+ case OPT_OPTIMIZE:
+ driver_setup.optimize = val;
+ break;
+ case OPT_RECOVERY:
+ driver_setup.recovery = val;
+ break;
+ case OPT_USE_NVRAM:
+ driver_setup.use_nvram = val;
+ break;
+ case OPT_SAFE_SETUP:
+ memcpy(&driver_setup, &driver_safe_setup,
+ sizeof(driver_setup));
+ break;
+ case OPT_EXCLUDE:
+ if (xi < SCSI_NCR_MAX_EXCLUDES)
+ driver_setup.excludes[xi++] = val;
+ break;
+ case OPT_HOST_ID:
+ driver_setup.host_id = val;
+ break;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ case OPT_IARB:
+ driver_setup.iarb = val;
+ break;
+#endif
+ default:
+ printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
+ break;
+ }
+
+ if ((cur = strchr(cur, ARG_SEP)) != NULL)
+ ++cur;
+ }
+#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
+ return 1;
+}
+
+/*===================================================================
+**
+** Get device queue depth from boot command line.
+**
+**===================================================================
+*/
+#define DEF_DEPTH (driver_setup.default_tags)
+#define ALL_TARGETS -2
+#define NO_TARGET -1
+#define ALL_LUNS -2
+#define NO_LUN -1
+
+static int device_queue_depth(int unit, int target, int lun)
+{
+ int c, h, t, u, v;
+ char *p = driver_setup.tag_ctrl;
+ char *ep;
+
+ h = -1;
+ t = NO_TARGET;
+ u = NO_LUN;
+ while ((c = *p++) != 0) {
+ v = simple_strtoul(p, &ep, 0);
+ switch(c) {
+ case '/':
+ ++h;
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ case 't':
+ if (t != target)
+ t = (target == v) ? v : NO_TARGET;
+ u = ALL_LUNS;
+ break;
+ case 'u':
+ if (u != lun)
+ u = (lun == v) ? v : NO_LUN;
+ break;
+ case 'q':
+ if (h == unit &&
+ (t == ALL_TARGETS || t == target) &&
+ (u == ALL_LUNS || u == lun))
+ return v;
+ break;
+ case '-':
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ default:
+ break;
+ }
+ p = ep;
+ }
+ return DEF_DEPTH;
+}
+
+/*===================================================================
+**
+** Print out information about driver configuration.
+**
+**===================================================================
+*/
+static void __init ncr_print_driver_setup(void)
+{
+#define YesNo(y) y ? 'y' : 'n'
+ printk (NAME53C8XX ": setup=disc:%c,specf:%d,ultra:%d,tags:%d,sync:%d,"
+ "burst:%d,wide:%c,diff:%d,revprob:%c,buschk:0x%x\n",
+ YesNo(driver_setup.disconnection),
+ driver_setup.special_features,
+ driver_setup.ultra_scsi,
+ driver_setup.default_tags,
+ driver_setup.default_sync,
+ driver_setup.burst_max,
+ YesNo(driver_setup.max_wide),
+ driver_setup.diff_support,
+ YesNo(driver_setup.reverse_probe),
+ driver_setup.bus_check);
+
+ printk (NAME53C8XX ": setup=mpar:%c,spar:%c,fsn=%c,verb:%d,debug:0x%x,"
+ "led:%c,settle:%d,irqm:0x%x,nvram:0x%x,pcifix:0x%x\n",
+ YesNo(driver_setup.master_parity),
+ YesNo(driver_setup.scsi_parity),
+ YesNo(driver_setup.force_sync_nego),
+ driver_setup.verbose,
+ driver_setup.debug,
+ YesNo(driver_setup.led_pin),
+ driver_setup.settle_delay,
+ driver_setup.irqm,
+ driver_setup.use_nvram,
+ driver_setup.pci_fix_up);
+#undef YesNo
+}
+
+/*===================================================================
+**
+** SYM53C8XX devices description table.
+**
+**===================================================================
+*/
+
+static ncr_chip ncr_chip_table[] __initdata = SCSI_NCR_CHIP_TABLE;
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+/*===================================================================
+**
+** Detect all NCR PQS/PDS boards and keep track of their bus nr.
+**
+** The NCR PQS or PDS card is constructed as a DEC bridge
+** behind which sit a proprietary NCR memory controller and
+** four or two 53c875s as separate devices. In its usual mode
+** of operation, the 875s are slaved to the memory controller
+** for all transfers. We can tell if an 875 is part of a
+** PQS/PDS or not since if it is, it will be on the same bus
+** as the memory controller. To operate with the Linux
+** driver, the memory controller is disabled and the 875s
+** freed to function independently. The only wrinkle is that
+** the preset SCSI ID (which may be zero) must be read in from
+** a special configuration space register of the 875.
+**
+**===================================================================
+*/
+#define SCSI_NCR_MAX_PQS_BUS 16
+static int pqs_bus[SCSI_NCR_MAX_PQS_BUS] __initdata = { 0 };
+
+static void __init ncr_detect_pqs_pds(void)
+{
+ short index;
+ pcidev_t dev = PCIDEV_NULL;
+
+ for(index=0; index < SCSI_NCR_MAX_PQS_BUS; index++) {
+ u_char tmp;
+
+ dev = pci_find_device(0x101a, 0x0009, dev);
+ if (dev == PCIDEV_NULL) {
+ pqs_bus[index] = -1;
+ break;
+ }
+ printk(KERN_INFO NAME53C8XX ": NCR PQS/PDS memory controller detected on bus %d\n", PciBusNumber(dev));
+ pci_read_config_byte(dev, 0x44, &tmp);
+ /* bit 1: allow individual 875 configuration */
+ tmp |= 0x2;
+ pci_write_config_byte(dev, 0x44, tmp);
+ pci_read_config_byte(dev, 0x45, &tmp);
+ /* bit 2: drive individual 875 interrupts to the bus */
+ tmp |= 0x4;
+ pci_write_config_byte(dev, 0x45, tmp);
+
+ pqs_bus[index] = PciBusNumber(dev);
+ }
+}
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+/*===================================================================
+**
+** Read and check the PCI configuration for any detected NCR
+** boards and save data for attaching after all boards have
+** been detected.
+**
+**===================================================================
+*/
+static int __init
+sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device)
+{
+ u_short vendor_id, device_id, command;
+ u_char cache_line_size, latency_timer;
+ u_char suggested_cache_line_size = 0;
+ u_char pci_fix_up = driver_setup.pci_fix_up;
+ u_char revision;
+ u_int irq;
+ u_long base, base_2, io_port;
+ int i;
+ ncr_chip *chip;
+
+ printk(KERN_INFO NAME53C8XX ": at PCI bus %d, device %d, function %d\n",
+ PciBusNumber(pdev),
+ (int) (PciDeviceFn(pdev) & 0xf8) >> 3,
+ (int) (PciDeviceFn(pdev) & 7));
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ if (!pci_dma_supported(pdev, (dma_addr_t) (0xffffffffUL))) {
+ printk(KERN_WARNING NAME53C8XX
+ "32 BIT PCI BUS DMA ADDRESSING NOT SUPPORTED\n");
+ return -1;
+ }
+#endif
+
+ /*
+ ** Read info from the PCI config space.
+ ** pci_read_config_xxx() functions are assumed to be used for
+ ** successfully detected PCI devices.
+ */
+ vendor_id = PciVendorId(pdev);
+ device_id = PciDeviceId(pdev);
+ irq = PciIrqLine(pdev);
+ i = 0;
+ i = pci_get_base_address(pdev, i, &io_port);
+ i = pci_get_base_address(pdev, i, &base);
+ (void) pci_get_base_address(pdev, i, &base_2);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ /*
+ ** Match the BUS number for PQS/PDS devices.
+ ** Read the SCSI ID from a special register mapped
+ ** into the configuration space of the individual
+ ** 875s. This register is set up by the PQS bios
+ */
+ for(i = 0; i < SCSI_NCR_MAX_PQS_BUS && pqs_bus[i] != -1; i++) {
+ u_char tmp;
+ if (pqs_bus[i] == PciBusNumber(pdev)) {
+ pci_read_config_byte(pdev, 0x84, &tmp);
+ device->pqs_pds = 1;
+ device->host_id = tmp;
+ break;
+ }
+ }
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+ /*
+ ** If user excludes this chip, donnot initialize it.
+ */
+ for (i = 0 ; i < SCSI_NCR_MAX_EXCLUDES ; i++) {
+ if (driver_setup.excludes[i] ==
+ (io_port & PCI_BASE_ADDRESS_IO_MASK))
+ return -1;
+ }
+ /*
+ ** Check if the chip is supported
+ */
+ if ((device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (device_id == PCI_DEVICE_ID_LSI_53C1010_66)){
+ printk(NAME53C8XX ": not initializing, device not supported\n");
+ return -1;
+ }
+ chip = 0;
+ for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) {
+ if (device_id != ncr_chip_table[i].device_id)
+ continue;
+ if (revision > ncr_chip_table[i].revision_id)
+ continue;
+ chip = &device->chip;
+ memcpy(chip, &ncr_chip_table[i], sizeof(*chip));
+ chip->revision_id = revision;
+ break;
+ }
+
+ /*
+ ** Ignore Symbios chips controlled by SISL RAID controller.
+ ** This controller sets value 0x52414944 at RAM end - 16.
+ */
+#if defined(__i386__) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
+ if (chip && (base_2 & PCI_BASE_ADDRESS_MEM_MASK)) {
+ unsigned int ram_size, ram_val;
+ u_long ram_ptr;
+
+ if (chip->features & FE_RAM8K)
+ ram_size = 8192;
+ else
+ ram_size = 4096;
+
+ ram_ptr = remap_pci_mem(base_2 & PCI_BASE_ADDRESS_MEM_MASK,
+ ram_size);
+ if (ram_ptr) {
+ ram_val = readl_raw(ram_ptr + ram_size - 16);
+ unmap_pci_mem(ram_ptr, ram_size);
+ if (ram_val == 0x52414944) {
+ printk(NAME53C8XX": not initializing, "
+ "driven by SISL RAID controller.\n");
+ return -1;
+ }
+ }
+ }
+#endif /* i386 and PCI MEMORY accessible */
+
+ if (!chip) {
+ printk(NAME53C8XX ": not initializing, device not supported\n");
+ return -1;
+ }
+
+#ifdef __powerpc__
+ /*
+ ** Fix-up for power/pc.
+ ** Should not be performed by the driver.
+ */
+ if ((command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ != (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": setting%s%s...\n",
+ (command & PCI_COMMAND_IO) ? "" : " PCI_COMMAND_IO",
+ (command & PCI_COMMAND_MEMORY) ? "" : " PCI_COMMAND_MEMORY");
+ command |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,2,0)
+ if ( is_prep ) {
+ if (io_port >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating io_port (Wacky IBM)");
+ io_port = (io_port & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_0, io_port);
+ }
+ if (base >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base (Wacky IBM)");
+ base = (base & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_1, base);
+ }
+ if (base_2 >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base2 (Wacky IBM)");
+ base_2 = (base_2 & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_2, base_2);
+ }
+ }
+#endif
+#endif /* __powerpc__ */
+
+#if defined(__sparc__) && (LINUX_VERSION_CODE < LinuxVersionCode(2,3,0))
+ /*
+ * Severall fix-ups for sparc.
+ *
+ * Should not be performed by the driver, which is why all
+ * this crap is cleaned up in 2.4.x
+ */
+
+ base = __pa(base);
+ base_2 = __pa(base_2);
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ if (initverbose >= 2)
+ printk("ncr53c8xx: setting PCI_COMMAND_MASTER bit (fixup)\n");
+ command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(bus, device_fn, PCI_COMMAND, command);
+ pcibios_read_config_word(bus, device_fn, PCI_COMMAND, &command);
+ }
+
+ if ((chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ if (initverbose >= 2)
+ printk("ncr53c8xx: setting PCI_COMMAND_INVALIDATE bit (fixup)\n");
+ command |= PCI_COMMAND_INVALIDATE;
+ pcibios_write_config_word(bus, device_fn, PCI_COMMAND, command);
+ pcibios_read_config_word(bus, device_fn, PCI_COMMAND, &command);
+ }
+
+ if ((chip->features & FE_CLSE) && !cache_line_size) {
+ /* PCI_CACHE_LINE_SIZE value is in 32-bit words. */
+ cache_line_size = 64 / sizeof(u_int32);
+ if (initverbose >= 2)
+ printk("ncr53c8xx: setting PCI_CACHE_LINE_SIZE to %d (fixup)\n",
+ cache_line_size);
+ pcibios_write_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ pcibios_read_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, &cache_line_size);
+ }
+
+ if (!latency_timer) {
+ unsigned char min_gnt;
+
+ pcibios_read_config_byte(bus, device_fn,
+ PCI_MIN_GNT, &min_gnt);
+ if (min_gnt == 0)
+ latency_timer = 128;
+ else
+ latency_timer = ((min_gnt << 3) & 0xff);
+ printk("ncr53c8xx: setting PCI_LATENCY_TIMER to %d bus clocks (fixup)\n", latency_timer);
+ pcibios_write_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, latency_timer);
+ pcibios_read_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, &latency_timer);
+ }
+#endif /* __sparc__ && (LINUX_VERSION_CODE < LinuxVersionCode(2,3,0)) */
+
+#if defined(__i386__) && !defined(MODULE)
+ if (!cache_line_size) {
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,75)
+ extern char x86;
+ switch(x86) {
+#else
+ switch(boot_cpu_data.x86) {
+#endif
+ case 4: suggested_cache_line_size = 4; break;
+ case 6:
+ case 5: suggested_cache_line_size = 8; break;
+ }
+ }
+#endif /* __i386__ */
+
+ /*
+ ** Check availability of IO space, memory space.
+ ** Enable master capability if not yet.
+ **
+ ** We shouldn't have to care about the IO region when
+ ** we are using MMIO. But calling check_region() from
+ ** both the ncr53c8xx and the sym53c8xx drivers prevents
+ ** from attaching devices from the both drivers.
+ ** If you have a better idea, let me know.
+ */
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (!(command & PCI_COMMAND_IO)) {
+ printk(NAME53C8XX ": I/O base address (0x%lx) disabled.\n",
+ (long) io_port);
+ io_port = 0;
+ }
+#endif
+ if (!(command & PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": PCI_COMMAND_MEMORY not set.\n");
+ base = 0;
+ base_2 = 0;
+ }
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (io_port && check_region (io_port, 128)) {
+ printk(NAME53C8XX ": IO region 0x%lx[0..127] is in use\n",
+ (long) io_port);
+ io_port = 0;
+ }
+ if (!io_port)
+ return -1;
+#endif
+#ifndef SCSI_NCR_IOMAPPED
+ if (!base) {
+ printk(NAME53C8XX ": MMIO base address disabled.\n");
+ return -1;
+ }
+#endif
+
+/* The ncr53c8xx driver never did set the PCI parity bit. */
+/* Since setting this bit is known to trigger spurious MDPE */
+/* errors on some 895 controllers when noise on power lines is */
+/* too high, I donnot want to change previous ncr53c8xx driver */
+/* behaviour on that point (the sym53c8xx driver set this bit). */
+#if 0
+ /*
+ ** Set MASTER capable and PARITY bit, if not yet.
+ */
+ if ((command & (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY))
+ != (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY)) {
+ printk(NAME53C8XX ": setting%s%s...(fix-up)\n",
+ (command & PCI_COMMAND_MASTER) ? "" : " PCI_COMMAND_MASTER",
+ (command & PCI_COMMAND_PARITY) ? "" : " PCI_COMMAND_PARITY");
+ command |= (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+#else
+ /*
+ ** Set MASTER capable if not yet.
+ */
+ if ((command & PCI_COMMAND_MASTER) != PCI_COMMAND_MASTER) {
+ printk(NAME53C8XX ": setting PCI_COMMAND_MASTER...(fix-up)\n");
+ command |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+#endif
+
+ /*
+ ** Fix some features according to driver setup.
+ */
+ if (!(driver_setup.special_features & 1))
+ chip->features &= ~FE_SPECIAL_SET;
+ else {
+ if (driver_setup.special_features & 2)
+ chip->features &= ~FE_WRIE;
+ if (driver_setup.special_features & 4)
+ chip->features &= ~FE_NOPM;
+ }
+ if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
+ chip->features |= FE_ULTRA;
+ chip->features &= ~FE_ULTRA2;
+ }
+ if (driver_setup.ultra_scsi < 1)
+ chip->features &= ~FE_ULTRA;
+ if (!driver_setup.max_wide)
+ chip->features &= ~FE_WIDE;
+
+ /*
+ ** Some features are required to be enabled in order to
+ ** work around some chip problems. :) ;)
+ ** (ITEM 12 of a DEL about the 896 I haven't yet).
+ ** We must ensure the chip will use WRITE AND INVALIDATE.
+ ** The revision number limit is for now arbitrary.
+ */
+ if (device_id == PCI_DEVICE_ID_NCR_53C896 && revision <= 0x10) {
+ chip->features |= (FE_WRIE | FE_CLSE);
+ pci_fix_up |= 3; /* Force appropriate PCI fix-up */
+ }
+
+#ifdef SCSI_NCR_PCI_FIX_UP_SUPPORT
+ /*
+ ** Try to fix up PCI config according to wished features.
+ */
+ if ((pci_fix_up & 1) && (chip->features & FE_CLSE) &&
+ !cache_line_size && suggested_cache_line_size) {
+ cache_line_size = suggested_cache_line_size;
+ pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ printk(NAME53C8XX ": PCI_CACHE_LINE_SIZE set to %d (fix-up).\n",
+ cache_line_size);
+ }
+
+ if ((pci_fix_up & 2) && cache_line_size &&
+ (chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ printk(NAME53C8XX": setting PCI_COMMAND_INVALIDATE (fix-up)\n");
+ command |= PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+ /*
+ ** Tune PCI LATENCY TIMER according to burst max length transfer.
+ ** (latency timer >= burst length + 6, we add 10 to be quite sure)
+ */
+
+ if (chip->burst_max && (latency_timer == 0 || (pci_fix_up & 4))) {
+ u_char lt = (1 << chip->burst_max) + 6 + 10;
+ if (latency_timer < lt) {
+ printk(NAME53C8XX
+ ": changing PCI_LATENCY_TIMER from %d to %d.\n",
+ (int) latency_timer, (int) lt);
+ latency_timer = lt;
+ pci_write_config_byte(pdev,
+ PCI_LATENCY_TIMER, latency_timer);
+ }
+ }
+
+#endif /* SCSI_NCR_PCI_FIX_UP_SUPPORT */
+
+ /*
+ ** Initialise ncr_device structure with items required by ncr_attach.
+ */
+ device->pdev = pdev;
+ device->slot.bus = PciBusNumber(pdev);
+ device->slot.device_fn = PciDeviceFn(pdev);
+ device->slot.base = base;
+ device->slot.base_2 = base_2;
+ device->slot.io_port = io_port;
+ device->slot.irq = irq;
+ device->attach_done = 0;
+
+ return 0;
+}
+
+/*===================================================================
+**
+** Detect all 53c8xx hosts and then attach them.
+**
+** If we are using NVRAM, once all hosts are detected, we need to
+** check any NVRAM for boot order in case detect and boot order
+** differ and attach them using the order in the NVRAM.
+**
+** If no NVRAM is found or data appears invalid attach boards in
+** the the order they are detected.
+**
+**===================================================================
+*/
+static int __init
+sym53c8xx__detect(Scsi_Host_Template *tpnt, u_short ncr_chip_ids[], int chips)
+{
+ pcidev_t pcidev;
+ int i, j, hosts, count;
+ int attach_count = 0;
+ ncr_device *devtbl, *devp;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram nvram0, nvram, *nvp;
+#endif
+
+ /*
+ ** PCI is required.
+ */
+ if (!pci_present())
+ return 0;
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = driver_setup.debug;
+#endif
+ if (initverbose >= 2)
+ ncr_print_driver_setup();
+
+ /*
+ ** Allocate the device table since we donnot want to
+ ** overflow the kernel stack.
+ ** 1 x 4K PAGE is enough for more than 40 devices for i386.
+ */
+ devtbl = m_calloc(PAGE_SIZE, "devtbl");
+ if (!devtbl)
+ return 0;
+
+ /*
+ ** Detect all NCR PQS/PDS memory controllers.
+ */
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ ncr_detect_pqs_pds();
+#endif
+
+ /*
+ ** Detect all 53c8xx hosts.
+ ** Save the first Symbios NVRAM content if any
+ ** for the boot order.
+ */
+ hosts = PAGE_SIZE / sizeof(*devtbl);
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ nvp = (driver_setup.use_nvram & 0x1) ? &nvram0 : 0;
+#endif
+ j = 0;
+ count = 0;
+ pcidev = PCIDEV_NULL;
+ while (1) {
+ char *msg = "";
+ if (count >= hosts)
+ break;
+ if (j >= chips)
+ break;
+ i = driver_setup.reverse_probe ? chips - 1 - j : j;
+ pcidev = pci_find_device(PCI_VENDOR_ID_NCR, ncr_chip_ids[i],
+ pcidev);
+ if (pcidev == PCIDEV_NULL) {
+ ++j;
+ continue;
+ }
+ /* Some HW as the HP LH4 may report twice PCI devices */
+ for (i = 0; i < count ; i++) {
+ if (devtbl[i].slot.bus == PciBusNumber(pcidev) &&
+ devtbl[i].slot.device_fn == PciDeviceFn(pcidev))
+ break;
+ }
+ if (i != count) /* Ignore this device if we already have it */
+ continue;
+ devp = &devtbl[count];
+ devp->host_id = driver_setup.host_id;
+ devp->attach_done = 0;
+ if (sym53c8xx_pci_init(tpnt, pcidev, devp)) {
+ continue;
+ }
+ ++count;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvp) {
+ ncr_get_nvram(devp, nvp);
+ switch(nvp->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ /*
+ * Switch to the other nvram buffer, so that
+ * nvram0 will contain the first Symbios
+ * format NVRAM content with boot order.
+ */
+ nvp = &nvram;
+ msg = "with Symbios NVRAM";
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+ msg = "with Tekram NVRAM";
+ break;
+ }
+ }
+#endif
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ if (devp->pqs_pds)
+ msg = "(NCR PQS/PDS)";
+#endif
+ printk(KERN_INFO NAME53C8XX ": 53c%s detected %s\n",
+ devp->chip.name, msg);
+ }
+
+ /*
+ ** If we have found a SYMBIOS NVRAM, use first the NVRAM boot
+ ** sequence as device boot order.
+ ** check devices in the boot record against devices detected.
+ ** attach devices if we find a match. boot table records that
+ ** do not match any detected devices will be ignored.
+ ** devices that do not match any boot table will not be attached
+ ** here but will attempt to be attached during the device table
+ ** rescan.
+ */
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (!nvp || nvram0.type != SCSI_NCR_SYMBIOS_NVRAM)
+ goto next;
+ for (i = 0; i < 4; i++) {
+ Symbios_host *h = &nvram0.data.Symbios.host[i];
+ for (j = 0 ; j < count ; j++) {
+ devp = &devtbl[j];
+ if (h->device_fn != devp->slot.device_fn ||
+ h->bus_nr != devp->slot.bus ||
+ h->device_id != devp->chip.device_id)
+ continue;
+ if (devp->attach_done)
+ continue;
+ if (h->flags & SYMBIOS_INIT_SCAN_AT_BOOT) {
+ ncr_get_nvram(devp, nvp);
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+#if 0 /* Restore previous behaviour of ncr53c8xx driver */
+ else if (!(driver_setup.use_nvram & 0x80))
+ printk(KERN_INFO NAME53C8XX
+ ": 53c%s state OFF thus not attached\n",
+ devp->chip.name);
+#endif
+ else
+ continue;
+
+ devp->attach_done = 1;
+ break;
+ }
+ }
+next:
+#endif
+
+ /*
+ ** Rescan device list to make sure all boards attached.
+ ** Devices without boot records will not be attached yet
+ ** so try to attach them here.
+ */
+ for (i= 0; i < count; i++) {
+ devp = &devtbl[i];
+ if (!devp->attach_done) {
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_get_nvram(devp, nvp);
+#endif
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+ }
+
+ m_free(devtbl, PAGE_SIZE, "devtbl");
+
+ return attach_count;
+}
diff --git a/linux/src/drivers/scsi/sym53c8xx_defs.h b/linux/src/drivers/scsi/sym53c8xx_defs.h
new file mode 100644
index 00000000..10acf780
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx_defs.h
@@ -0,0 +1,1767 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+#ifndef SYM53C8XX_DEFS_H
+#define SYM53C8XX_DEFS_H
+
+/*
+** Check supported Linux versions
+*/
+
+#if !defined(LINUX_VERSION_CODE)
+#include <linux/version.h>
+#endif
+#include <linux/config.h>
+
+#ifndef LinuxVersionCode
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+#endif
+
+/*
+ * NCR PQS/PDS special device support.
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_PQS_PDS
+#define SCSI_NCR_PQS_PDS_SUPPORT
+#endif
+
+/*
+ * No more an option, enabled by default.
+ */
+#ifndef CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#define CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#endif
+
+/*
+** These options are not tunable from 'make config'
+*/
+//#define SCSI_NCR_PROC_INFO_SUPPORT
+
+/*
+** If you want a driver as small as possible, donnot define the
+** following options.
+*/
+#define SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+#define SCSI_NCR_DEBUG_INFO_SUPPORT
+#define SCSI_NCR_PCI_FIX_UP_SUPPORT
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+# define SCSI_NCR_USER_COMMAND_SUPPORT
+# define SCSI_NCR_USER_INFO_SUPPORT
+#endif
+
+/*
+** To disable integrity checking, do not define the
+** following option.
+*/
+#ifdef CONFIG_SCSI_NCR53C8XX_INTEGRITY_CHECK
+# define SCSI_NCR_ENABLE_INTEGRITY_CHECK
+#endif
+
+/*==========================================================
+**
+** nvram settings - #define SCSI_NCR_NVRAM_SUPPORT to enable
+**
+**==========================================================
+*/
+
+#ifdef CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#define SCSI_NCR_NVRAM_SUPPORT
+/* #define SCSI_NCR_DEBUG_NVRAM */
+#endif
+
+/* ---------------------------------------------------------------------
+** Take into account kernel configured parameters.
+** Most of these options can be overridden at startup by a command line.
+** ---------------------------------------------------------------------
+*/
+
+/*
+ * For Ultra2 and Ultra3 SCSI support option, use special features.
+ *
+ * Value (default) means:
+ * bit 0 : all features enabled, except:
+ * bit 1 : PCI Write And Invalidate.
+ * bit 2 : Data Phase Mismatch handling from SCRIPTS.
+ *
+ * Use boot options ncr53c8xx=specf:1 if you want all chip features to be
+ * enabled by the driver.
+ */
+#define SCSI_NCR_SETUP_SPECIAL_FEATURES (3)
+
+/*
+ * For Ultra2 and Ultra3 SCSI support allow 80Mhz synchronous data transfers.
+ * Value means:
+ * 0 - Ultra speeds disabled
+ * 1 - Ultra enabled (Maximum 20Mtrans/sec)
+ * 2 - Ultra2 enabled (Maximum 40Mtrans/sec)
+ * 3 - Ultra3 enabled (Maximum 80Mtrans/sec)
+ *
+ * Use boot options sym53c8xx=ultra:3 to enable Ultra3 support.
+ */
+
+#define SCSI_NCR_SETUP_ULTRA_SCSI (3)
+#define SCSI_NCR_MAX_SYNC (80)
+
+/*
+ * Allow tags from 2 to 256, default 8
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#if CONFIG_SCSI_NCR53C8XX_MAX_TAGS < 2
+#define SCSI_NCR_MAX_TAGS (2)
+#elif CONFIG_SCSI_NCR53C8XX_MAX_TAGS > 256
+#define SCSI_NCR_MAX_TAGS (256)
+#else
+#define SCSI_NCR_MAX_TAGS CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#endif
+#else
+#define SCSI_NCR_MAX_TAGS (8)
+#endif
+
+/*
+ * Allow tagged command queuing support if configured with default number
+ * of tags set to max (see above).
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS
+#define SCSI_NCR_SETUP_DEFAULT_TAGS CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS
+#elif defined CONFIG_SCSI_NCR53C8XX_TAGGED_QUEUE
+#define SCSI_NCR_SETUP_DEFAULT_TAGS SCSI_NCR_MAX_TAGS
+#else
+#define SCSI_NCR_SETUP_DEFAULT_TAGS (0)
+#endif
+
+/*
+ * Use normal IO if configured. Forced for alpha and powerpc.
+ * Powerpc fails copying to on-chip RAM using memcpy_toio().
+ */
+#if defined(CONFIG_SCSI_NCR53C8XX_IOMAPPED)
+#define SCSI_NCR_IOMAPPED
+#elif defined(__alpha__)
+#define SCSI_NCR_IOMAPPED
+#elif defined(__powerpc__)
+#define SCSI_NCR_IOMAPPED
+#define SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+#elif defined(__sparc__)
+#undef SCSI_NCR_IOMAPPED
+#endif
+
+/*
+ * Should we enable DAC cycles on this platform?
+ * Until further investigation we do not enable it
+ * anywhere at the moment.
+ */
+#undef SCSI_NCR_USE_64BIT_DAC
+
+/*
+ * Immediate arbitration
+ */
+#if defined(CONFIG_SCSI_NCR53C8XX_IARB)
+#define SCSI_NCR_IARB_SUPPORT
+#endif
+
+/*
+ * Should we enable DAC cycles on sparc64 platforms?
+ * Until further investigation we do not enable it
+ * anywhere at the moment.
+ */
+#undef SCSI_NCR_USE_64BIT_DAC
+
+/*
+ * Sync transfer frequency at startup.
+ * Allow from 5Mhz to 80Mhz default 20 Mhz.
+ */
+#ifndef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC (20)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC > SCSI_NCR_MAX_SYNC
+#undef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC SCSI_NCR_MAX_SYNC
+#endif
+
+#if CONFIG_SCSI_NCR53C8XX_SYNC == 0
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (255)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 5
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (50)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 20
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (250/(CONFIG_SCSI_NCR53C8XX_SYNC))
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 33
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (11)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 40
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (10)
+#else
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (9)
+#endif
+
+/*
+ * Disallow disconnections at boot-up
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_NO_DISCONNECT
+#define SCSI_NCR_SETUP_DISCONNECTION (0)
+#else
+#define SCSI_NCR_SETUP_DISCONNECTION (1)
+#endif
+
+/*
+ * Force synchronous negotiation for all targets
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_FORCE_SYNC_NEGO
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (1)
+#else
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (0)
+#endif
+
+/*
+ * Disable master parity checking (flawed hardwares need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_MPARITY_CHECK
+#define SCSI_NCR_SETUP_MASTER_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_MASTER_PARITY (1)
+#endif
+
+/*
+ * Disable scsi parity checking (flawed devices may need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_PARITY_CHECK
+#define SCSI_NCR_SETUP_SCSI_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_SCSI_PARITY (1)
+#endif
+
+/*
+ * Vendor specific stuff
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT
+#define SCSI_NCR_SETUP_LED_PIN (1)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (4)
+#else
+#define SCSI_NCR_SETUP_LED_PIN (0)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (0)
+#endif
+
+/*
+ * Settle time after reset at boot-up
+ */
+#define SCSI_NCR_SETUP_SETTLE_TIME (2)
+
+/*
+** Bridge quirks work-around option defaulted to 1.
+*/
+#ifndef SCSI_NCR_PCIQ_WORK_AROUND_OPT
+#define SCSI_NCR_PCIQ_WORK_AROUND_OPT 1
+#endif
+
+/*
+** Work-around common bridge misbehaviour.
+**
+** - Do not flush posted writes in the opposite
+** direction on read.
+** - May reorder DMA writes to memory.
+**
+** This option should not affect performances
+** significantly, so it is the default.
+*/
+#if SCSI_NCR_PCIQ_WORK_AROUND_OPT == 1
+#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM
+#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+
+/*
+** Same as option 1, but also deal with
+** misconfigured interrupts.
+**
+** - Edge triggerred instead of level sensitive.
+** - No interrupt line connected.
+** - IRQ number misconfigured.
+**
+** If no interrupt is delivered, the driver will
+** catch the interrupt conditions 10 times per
+** second. No need to say that this option is
+** not recommended.
+*/
+#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 2
+#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM
+#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+#define SCSI_NCR_PCIQ_BROKEN_INTR
+
+/*
+** Some bridge designers decided to flush
+** everything prior to deliver the interrupt.
+** This option tries to deal with such a
+** behaviour.
+*/
+#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 3
+#define SCSI_NCR_PCIQ_SYNC_ON_INTR
+#endif
+
+/*
+** Other parameters not configurable with "make config"
+** Avoid to change these constants, unless you know what you are doing.
+*/
+
+#define SCSI_NCR_ALWAYS_SIMPLE_TAG
+#define SCSI_NCR_MAX_SCATTER (127)
+#define SCSI_NCR_MAX_TARGET (16)
+
+/*
+** Compute some desirable value for CAN_QUEUE
+** and CMD_PER_LUN.
+** The driver will use lower values if these
+** ones appear to be too large.
+*/
+#define SCSI_NCR_CAN_QUEUE (8*SCSI_NCR_MAX_TAGS + 2*SCSI_NCR_MAX_TARGET)
+#define SCSI_NCR_CMD_PER_LUN (SCSI_NCR_MAX_TAGS)
+
+#define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER)
+#define SCSI_NCR_TIMER_INTERVAL (HZ)
+
+#if 1 /* defined CONFIG_SCSI_MULTI_LUN */
+#define SCSI_NCR_MAX_LUN (16)
+#else
+#define SCSI_NCR_MAX_LUN (1)
+#endif
+
+#ifndef HOSTS_C
+
+/*
+** These simple macros limit expression involving
+** kernel time values (jiffies) to some that have
+** chance not to be too much incorrect. :-)
+*/
+#define ktime_get(o) (jiffies + (u_long) o)
+#define ktime_exp(b) ((long)(jiffies) - (long)(b) >= 0)
+#define ktime_dif(a, b) ((long)(a) - (long)(b))
+/* These ones are not used in this driver */
+#define ktime_add(a, o) ((a) + (u_long)(o))
+#define ktime_sub(a, o) ((a) - (u_long)(o))
+
+
+/*
+ * IO functions definition for big/little endian CPU support.
+ * For now, the NCR is only supported in little endian addressing mode,
+ */
+
+#ifdef __BIG_ENDIAN
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#error "BIG ENDIAN byte ordering needs kernel version >= 2.1.0"
+#endif
+
+#define inw_l2b inw
+#define inl_l2b inl
+#define outw_b2l outw
+#define outl_b2l outl
+#define readw_l2b readw
+#define readl_l2b readl
+#define writew_b2l writew
+#define writel_b2l writel
+
+#else /* little endian */
+
+#if defined(__i386__) /* i386 implements full FLAT memory/MMIO model */
+#define inw_raw inw
+#define inl_raw inl
+#define outw_raw outw
+#define outl_raw outl
+#define readb_raw(a) (*(volatile unsigned char *) (a))
+#define readw_raw(a) (*(volatile unsigned short *) (a))
+#define readl_raw(a) (*(volatile unsigned int *) (a))
+#define writeb_raw(b,a) ((*(volatile unsigned char *) (a)) = (b))
+#define writew_raw(b,a) ((*(volatile unsigned short *) (a)) = (b))
+#define writel_raw(b,a) ((*(volatile unsigned int *) (a)) = (b))
+
+#else /* Other little-endian */
+#define inw_raw inw
+#define inl_raw inl
+#define outw_raw outw
+#define outl_raw outl
+#define readw_raw readw
+#define readl_raw readl
+#define writew_raw writew
+#define writel_raw writel
+
+#endif
+#endif
+
+#ifdef SCSI_NCR_BIG_ENDIAN
+#error "The NCR in BIG ENDIAN addressing mode is not (yet) supported"
+#endif
+
+
+/*
+ * IA32 architecture does not reorder STORES and prevents
+ * LOADS from passing STORES. It is called `program order'
+ * by Intel and allows device drivers to deal with memory
+ * ordering by only ensuring that the code is not reordered
+ * by the compiler when ordering is required.
+ * Other architectures implement a weaker ordering that
+ * requires memory barriers (and also IO barriers when they
+ * make sense) to be used.
+ * We want to be paranoid for ppc and ia64. :)
+ */
+
+#if defined __i386__
+#define MEMORY_BARRIER() do { ; } while(0)
+#elif defined __powerpc__
+#define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory")
+#elif defined __ia64__
+#define MEMORY_BARRIER() __asm__ volatile("mf.a; mf" : : : "memory")
+#else
+#define MEMORY_BARRIER() mb()
+#endif
+
+
+/*
+ * If the NCR uses big endian addressing mode over the
+ * PCI, actual io register addresses for byte and word
+ * accesses must be changed according to lane routing.
+ * Btw, ncr_offb() and ncr_offw() macros only apply to
+ * constants and so donnot generate bloated code.
+ */
+
+#if defined(SCSI_NCR_BIG_ENDIAN)
+
+#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3))
+#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2))
+
+#else
+
+#define ncr_offb(o) (o)
+#define ncr_offw(o) (o)
+
+#endif
+
+/*
+ * If the CPU and the NCR use same endian-ness addressing,
+ * no byte reordering is needed for script patching.
+ * Macro cpu_to_scr() is to be used for script patching.
+ * Macro scr_to_cpu() is to be used for getting a DWORD
+ * from the script.
+ */
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_le32(dw)
+#define scr_to_cpu(dw) le32_to_cpu(dw)
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_be32(dw)
+#define scr_to_cpu(dw) be32_to_cpu(dw)
+
+#else
+
+#define cpu_to_scr(dw) (dw)
+#define scr_to_cpu(dw) (dw)
+
+#endif
+
+/*
+ * Access to the controller chip.
+ *
+ * If SCSI_NCR_IOMAPPED is defined, the driver will use
+ * normal IOs instead of the MEMORY MAPPED IO method
+ * recommended by PCI specifications.
+ * If all PCI bridges, host brigdes and architectures
+ * would have been correctly designed for PCI, this
+ * option would be useless.
+ *
+ * If the CPU and the NCR use same endian-ness addressing,
+ * no byte reordering is needed for accessing chip io
+ * registers. Functions suffixed by '_raw' are assumed
+ * to access the chip over the PCI without doing byte
+ * reordering. Functions suffixed by '_l2b' are
+ * assumed to perform little-endian to big-endian byte
+ * reordering, those suffixed by '_b2l' blah, blah,
+ * blah, ...
+ */
+
+#if defined(SCSI_NCR_IOMAPPED)
+
+/*
+ * IO mapped only input / ouput
+ */
+
+#define INB_OFF(o) inb (np->base_io + ncr_offb(o))
+#define OUTB_OFF(o, val) outb ((val), np->base_io + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_l2b (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_l2b (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_b2l ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_b2l ((val), np->base_io + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_b2l (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_b2l (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_l2b ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_l2b ((val), np->base_io + (o))
+
+#else
+
+#define INW_OFF(o) inw_raw (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_raw (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_raw ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_raw ((val), np->base_io + (o))
+
+#endif /* ENDIANs */
+
+#else /* defined SCSI_NCR_IOMAPPED */
+
+/*
+ * MEMORY mapped IO input / output
+ */
+
+#define INB_OFF(o) readb((char *)np->reg + ncr_offb(o))
+#define OUTB_OFF(o, val) writeb((val), (char *)np->reg + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_l2b((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_l2b((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_b2l((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_b2l((val), (char *)np->reg + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_b2l((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_b2l((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_l2b((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_l2b((val), (char *)np->reg + (o))
+
+#else
+
+#define INW_OFF(o) readw_raw((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_raw((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_raw((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_raw((val), (char *)np->reg + (o))
+
+#endif
+
+#endif /* defined SCSI_NCR_IOMAPPED */
+
+#define INB(r) INB_OFF (offsetof(struct ncr_reg,r))
+#define INW(r) INW_OFF (offsetof(struct ncr_reg,r))
+#define INL(r) INL_OFF (offsetof(struct ncr_reg,r))
+
+#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val))
+
+/*
+ * Set bit field ON, OFF
+ */
+
+#define OUTONB(r, m) OUTB(r, INB(r) | (m))
+#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
+#define OUTONW(r, m) OUTW(r, INW(r) | (m))
+#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
+#define OUTONL(r, m) OUTL(r, INL(r) | (m))
+#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
+
+/*
+ * We normally want the chip to have a consistent view
+ * of driver internal data structures when we restart it.
+ * Thus these macros.
+ */
+#define OUTL_DSP(v) \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTL (nc_dsp, (v)); \
+ } while (0)
+
+#define OUTONB_STD() \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTONB (nc_dcntl, (STD|NOCOM)); \
+ } while (0)
+
+
+/*
+** NCR53C8XX Device Ids
+*/
+
+#ifndef PCI_DEVICE_ID_NCR_53C810
+#define PCI_DEVICE_ID_NCR_53C810 1
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C810AP
+#define PCI_DEVICE_ID_NCR_53C810AP 5
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C815
+#define PCI_DEVICE_ID_NCR_53C815 4
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C820
+#define PCI_DEVICE_ID_NCR_53C820 2
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C825
+#define PCI_DEVICE_ID_NCR_53C825 3
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C860
+#define PCI_DEVICE_ID_NCR_53C860 6
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875
+#define PCI_DEVICE_ID_NCR_53C875 0xf
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875J
+#define PCI_DEVICE_ID_NCR_53C875J 0x8f
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C885
+#define PCI_DEVICE_ID_NCR_53C885 0xd
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C895
+#define PCI_DEVICE_ID_NCR_53C895 0xc
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C896
+#define PCI_DEVICE_ID_NCR_53C896 0xb
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C895A
+#define PCI_DEVICE_ID_NCR_53C895A 0x12
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C1510D
+#define PCI_DEVICE_ID_NCR_53C1510D 0xa
+#endif
+
+#ifndef PCI_DEVICE_ID_LSI_53C1010
+#define PCI_DEVICE_ID_LSI_53C1010 0x20
+#endif
+
+#ifndef PCI_DEVICE_ID_LSI_53C1010_66
+#define PCI_DEVICE_ID_LSI_53C1010_66 0x21
+#endif
+
+
+/*
+** NCR53C8XX devices features table.
+*/
+typedef struct {
+ unsigned short device_id;
+ unsigned short revision_id;
+ char *name;
+ unsigned char burst_max; /* log-base-2 of max burst */
+ unsigned char offset_max;
+ unsigned char nr_divisor;
+ unsigned int features;
+#define FE_LED0 (1<<0)
+#define FE_WIDE (1<<1) /* Wide data transfers */
+#define FE_ULTRA (1<<2) /* Ultra speed 20Mtrans/sec */
+#define FE_ULTRA2 (1<<3) /* Ultra 2 - 40 Mtrans/sec */
+#define FE_DBLR (1<<4) /* Clock doubler present */
+#define FE_QUAD (1<<5) /* Clock quadrupler present */
+#define FE_ERL (1<<6) /* Enable read line */
+#define FE_CLSE (1<<7) /* Cache line size enable */
+#define FE_WRIE (1<<8) /* Write & Invalidate enable */
+#define FE_ERMP (1<<9) /* Enable read multiple */
+#define FE_BOF (1<<10) /* Burst opcode fetch */
+#define FE_DFS (1<<11) /* DMA fifo size */
+#define FE_PFEN (1<<12) /* Prefetch enable */
+#define FE_LDSTR (1<<13) /* Load/Store supported */
+#define FE_RAM (1<<14) /* On chip RAM present */
+#define FE_CLK80 (1<<15) /* Board clock is 80 MHz */
+#define FE_RAM8K (1<<16) /* On chip RAM sized 8Kb */
+#define FE_64BIT (1<<17) /* Supports 64-bit addressing */
+#define FE_IO256 (1<<18) /* Requires full 256 bytes in PCI space */
+#define FE_NOPM (1<<19) /* Scripts handles phase mismatch */
+#define FE_LEDC (1<<20) /* Hardware control of LED */
+#define FE_DIFF (1<<21) /* Support Differential SCSI */
+#define FE_ULTRA3 (1<<22) /* Ultra-3 80Mtrans/sec */
+#define FE_66MHZ (1<<23) /* 66MHz PCI Support */
+
+#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
+#define FE_SCSI_SET (FE_WIDE|FE_ULTRA|FE_ULTRA2|FE_DBLR|FE_QUAD|F_CLK80)
+#define FE_SPECIAL_SET (FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM)
+} ncr_chip;
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 3.
+** Memory Read transaction terminated by a retry followed by
+** Memory Read Line command.
+*/
+#define FE_CACHE0_SET (FE_CACHE_SET & ~FE_ERL)
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 5.
+** On paper, this errata is harmless. But it is a good reason for
+** using a shorter programmed burst length (64 DWORDS instead of 128).
+*/
+
+#define SCSI_NCR_CHIP_TABLE \
+{ \
+ {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, \
+ FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, \
+ FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, \
+ FE_ERL|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C820, 0xff, "820", 4, 8, 4, \
+ FE_WIDE|FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 4, 8, 4, \
+ FE_WIDE|FE_ERL|FE_BOF|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, \
+ FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, \
+ FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|\
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x0f, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x1f, "876", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x2f, "875E", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0xff, "876", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875J,0xff, "875J", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C895A, 0xff, "895a", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C1510D, 0xff, "1510D", 7, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_IO256} \
+ , \
+ {PCI_DEVICE_ID_LSI_53C1010, 0xff, "1010", 6, 31, 7, \
+ FE_WIDE|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3} \
+ , \
+ {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010_66", 6, 31, 7, \
+ FE_WIDE|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3|FE_66MHZ} \
+}
+
+/*
+ * List of supported NCR chip ids
+ */
+#define SCSI_NCR_CHIP_IDS \
+{ \
+ PCI_DEVICE_ID_NCR_53C810, \
+ PCI_DEVICE_ID_NCR_53C815, \
+ PCI_DEVICE_ID_NCR_53C820, \
+ PCI_DEVICE_ID_NCR_53C825, \
+ PCI_DEVICE_ID_NCR_53C860, \
+ PCI_DEVICE_ID_NCR_53C875, \
+ PCI_DEVICE_ID_NCR_53C875J, \
+ PCI_DEVICE_ID_NCR_53C885, \
+ PCI_DEVICE_ID_NCR_53C895, \
+ PCI_DEVICE_ID_NCR_53C896, \
+ PCI_DEVICE_ID_NCR_53C895A, \
+ PCI_DEVICE_ID_NCR_53C1510D, \
+ PCI_DEVICE_ID_LSI_53C1010, \
+ PCI_DEVICE_ID_LSI_53C1010_66 \
+}
+
+/*
+** Driver setup structure.
+**
+** This structure is initialized from linux config options.
+** It can be overridden at boot-up by the boot command line.
+*/
+#define SCSI_NCR_MAX_EXCLUDES 8
+struct ncr_driver_setup {
+ u_char master_parity;
+ u_char scsi_parity;
+ u_char disconnection;
+ u_char special_features;
+ u_char ultra_scsi;
+ u_char force_sync_nego;
+ u_char reverse_probe;
+ u_char pci_fix_up;
+ u_char use_nvram;
+ u_char verbose;
+ u_char default_tags;
+ u_short default_sync;
+ u_short debug;
+ u_char burst_max;
+ u_char led_pin;
+ u_char max_wide;
+ u_char settle_delay;
+ u_char diff_support;
+ u_char irqm;
+ u_char bus_check;
+ u_char optimize;
+ u_char recovery;
+ u_char host_id;
+ u_short iarb;
+ u_long excludes[SCSI_NCR_MAX_EXCLUDES];
+ char tag_ctrl[100];
+};
+
+/*
+** Initial setup.
+** Can be overriden at startup by a command line.
+*/
+#define SCSI_NCR_DRIVER_SETUP \
+{ \
+ SCSI_NCR_SETUP_MASTER_PARITY, \
+ SCSI_NCR_SETUP_SCSI_PARITY, \
+ SCSI_NCR_SETUP_DISCONNECTION, \
+ SCSI_NCR_SETUP_SPECIAL_FEATURES, \
+ SCSI_NCR_SETUP_ULTRA_SCSI, \
+ SCSI_NCR_SETUP_FORCE_SYNC_NEGO, \
+ 0, \
+ 0, \
+ 1, \
+ 0, \
+ SCSI_NCR_SETUP_DEFAULT_TAGS, \
+ SCSI_NCR_SETUP_DEFAULT_SYNC, \
+ 0x00, \
+ 7, \
+ SCSI_NCR_SETUP_LED_PIN, \
+ 1, \
+ SCSI_NCR_SETUP_SETTLE_TIME, \
+ SCSI_NCR_SETUP_DIFF_SUPPORT, \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 255, \
+ 0x00 \
+}
+
+/*
+** Boot fail safe setup.
+** Override initial setup from boot command line:
+** ncr53c8xx=safe:y
+*/
+#define SCSI_NCR_DRIVER_SAFE_SETUP \
+{ \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 1, \
+ 2, \
+ 0, \
+ 255, \
+ 0x00, \
+ 255, \
+ 0, \
+ 0, \
+ 10, \
+ 1, \
+ 1, \
+ 1, \
+ 0, \
+ 0, \
+ 255 \
+}
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+/*
+** Symbios NvRAM data format
+*/
+#define SYMBIOS_NVRAM_SIZE 368
+#define SYMBIOS_NVRAM_ADDRESS 0x100
+
+struct Symbios_nvram {
+/* Header 6 bytes */
+ u_short type; /* 0x0000 */
+ u_short byte_count; /* excluding header/trailer */
+ u_short checksum;
+
+/* Controller set up 20 bytes */
+ u_char v_major; /* 0x00 */
+ u_char v_minor; /* 0x30 */
+ u_int32 boot_crc;
+ u_short flags;
+#define SYMBIOS_SCAM_ENABLE (1)
+#define SYMBIOS_PARITY_ENABLE (1<<1)
+#define SYMBIOS_VERBOSE_MSGS (1<<2)
+#define SYMBIOS_CHS_MAPPING (1<<3)
+#define SYMBIOS_NO_NVRAM (1<<3) /* ??? */
+ u_short flags1;
+#define SYMBIOS_SCAN_HI_LO (1)
+ u_short term_state;
+#define SYMBIOS_TERM_CANT_PROGRAM (0)
+#define SYMBIOS_TERM_ENABLED (1)
+#define SYMBIOS_TERM_DISABLED (2)
+ u_short rmvbl_flags;
+#define SYMBIOS_RMVBL_NO_SUPPORT (0)
+#define SYMBIOS_RMVBL_BOOT_DEVICE (1)
+#define SYMBIOS_RMVBL_MEDIA_INSTALLED (2)
+ u_char host_id;
+ u_char num_hba; /* 0x04 */
+ u_char num_devices; /* 0x10 */
+ u_char max_scam_devices; /* 0x04 */
+ u_char num_valid_scam_devives; /* 0x00 */
+ u_char rsvd;
+
+/* Boot order 14 bytes * 4 */
+ struct Symbios_host{
+ u_short type; /* 4:8xx / 0:nok */
+ u_short device_id; /* PCI device id */
+ u_short vendor_id; /* PCI vendor id */
+ u_char bus_nr; /* PCI bus number */
+ u_char device_fn; /* PCI device/function number << 3*/
+ u_short word8;
+ u_short flags;
+#define SYMBIOS_INIT_SCAN_AT_BOOT (1)
+ u_short io_port; /* PCI io_port address */
+ } host[4];
+
+/* Targets 8 bytes * 16 */
+ struct Symbios_target {
+ u_char flags;
+#define SYMBIOS_DISCONNECT_ENABLE (1)
+#define SYMBIOS_SCAN_AT_BOOT_TIME (1<<1)
+#define SYMBIOS_SCAN_LUNS (1<<2)
+#define SYMBIOS_QUEUE_TAGS_ENABLED (1<<3)
+ u_char rsvd;
+ u_char bus_width; /* 0x08/0x10 */
+ u_char sync_offset;
+ u_short sync_period; /* 4*period factor */
+ u_short timeout;
+ } target[16];
+/* Scam table 8 bytes * 4 */
+ struct Symbios_scam {
+ u_short id;
+ u_short method;
+#define SYMBIOS_SCAM_DEFAULT_METHOD (0)
+#define SYMBIOS_SCAM_DONT_ASSIGN (1)
+#define SYMBIOS_SCAM_SET_SPECIFIC_ID (2)
+#define SYMBIOS_SCAM_USE_ORDER_GIVEN (3)
+ u_short status;
+#define SYMBIOS_SCAM_UNKNOWN (0)
+#define SYMBIOS_SCAM_DEVICE_NOT_FOUND (1)
+#define SYMBIOS_SCAM_ID_NOT_SET (2)
+#define SYMBIOS_SCAM_ID_VALID (3)
+ u_char target_id;
+ u_char rsvd;
+ } scam[4];
+
+ u_char spare_devices[15*8];
+ u_char trailer[6]; /* 0xfe 0xfe 0x00 0x00 0x00 0x00 */
+};
+typedef struct Symbios_nvram Symbios_nvram;
+typedef struct Symbios_host Symbios_host;
+typedef struct Symbios_target Symbios_target;
+typedef struct Symbios_scam Symbios_scam;
+
+/*
+** Tekram NvRAM data format.
+*/
+#define TEKRAM_NVRAM_SIZE 64
+#define TEKRAM_93C46_NVRAM_ADDRESS 0
+#define TEKRAM_24C16_NVRAM_ADDRESS 0x40
+
+struct Tekram_nvram {
+ struct Tekram_target {
+ u_char flags;
+#define TEKRAM_PARITY_CHECK (1)
+#define TEKRAM_SYNC_NEGO (1<<1)
+#define TEKRAM_DISCONNECT_ENABLE (1<<2)
+#define TEKRAM_START_CMD (1<<3)
+#define TEKRAM_TAGGED_COMMANDS (1<<4)
+#define TEKRAM_WIDE_NEGO (1<<5)
+ u_char sync_index;
+ u_short word2;
+ } target[16];
+ u_char host_id;
+ u_char flags;
+#define TEKRAM_MORE_THAN_2_DRIVES (1)
+#define TEKRAM_DRIVES_SUP_1GB (1<<1)
+#define TEKRAM_RESET_ON_POWER_ON (1<<2)
+#define TEKRAM_ACTIVE_NEGATION (1<<3)
+#define TEKRAM_IMMEDIATE_SEEK (1<<4)
+#define TEKRAM_SCAN_LUNS (1<<5)
+#define TEKRAM_REMOVABLE_FLAGS (3<<6) /* 0: disable; 1: boot device; 2:all */
+ u_char boot_delay_index;
+ u_char max_tags_index;
+ u_short flags1;
+#define TEKRAM_F2_F6_ENABLED (1)
+ u_short spare[29];
+};
+typedef struct Tekram_nvram Tekram_nvram;
+typedef struct Tekram_target Tekram_target;
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/**************** ORIGINAL CONTENT of ncrreg.h from FreeBSD ******************/
+
+/*-----------------------------------------------------------------
+**
+** The ncr 53c810 register structure.
+**
+**-----------------------------------------------------------------
+*/
+
+struct ncr_reg {
+/*00*/ u_char nc_scntl0; /* full arb., ena parity, par->ATN */
+
+/*01*/ u_char nc_scntl1; /* no reset */
+ #define ISCON 0x10 /* connected to scsi */
+ #define CRST 0x08 /* force reset */
+ #define IARB 0x02 /* immediate arbitration */
+
+/*02*/ u_char nc_scntl2; /* no disconnect expected */
+ #define SDU 0x80 /* cmd: disconnect will raise error */
+ #define CHM 0x40 /* sta: chained mode */
+ #define WSS 0x08 /* sta: wide scsi send [W]*/
+ #define WSR 0x01 /* sta: wide scsi received [W]*/
+
+/*03*/ u_char nc_scntl3; /* cnf system clock dependent */
+ #define EWS 0x08 /* cmd: enable wide scsi [W]*/
+ #define ULTRA 0x80 /* cmd: ULTRA enable */
+ /* bits 0-2, 7 rsvd for C1010 */
+
+/*04*/ u_char nc_scid; /* cnf host adapter scsi address */
+ #define RRE 0x40 /* r/w:e enable response to resel. */
+ #define SRE 0x20 /* r/w:e enable response to select */
+
+/*05*/ u_char nc_sxfer; /* ### Sync speed and count */
+ /* bits 6-7 rsvd for C1010 */
+
+/*06*/ u_char nc_sdid; /* ### Destination-ID */
+
+/*07*/ u_char nc_gpreg; /* ??? IO-Pins */
+
+/*08*/ u_char nc_sfbr; /* ### First byte in phase */
+
+/*09*/ u_char nc_socl;
+ #define CREQ 0x80 /* r/w: SCSI-REQ */
+ #define CACK 0x40 /* r/w: SCSI-ACK */
+ #define CBSY 0x20 /* r/w: SCSI-BSY */
+ #define CSEL 0x10 /* r/w: SCSI-SEL */
+ #define CATN 0x08 /* r/w: SCSI-ATN */
+ #define CMSG 0x04 /* r/w: SCSI-MSG */
+ #define CC_D 0x02 /* r/w: SCSI-C_D */
+ #define CI_O 0x01 /* r/w: SCSI-I_O */
+
+/*0a*/ u_char nc_ssid;
+
+/*0b*/ u_char nc_sbcl;
+
+/*0c*/ u_char nc_dstat;
+ #define DFE 0x80 /* sta: dma fifo empty */
+ #define MDPE 0x40 /* int: master data parity error */
+ #define BF 0x20 /* int: script: bus fault */
+ #define ABRT 0x10 /* int: script: command aborted */
+ #define SSI 0x08 /* int: script: single step */
+ #define SIR 0x04 /* int: script: interrupt instruct. */
+ #define IID 0x01 /* int: script: illegal instruct. */
+
+/*0d*/ u_char nc_sstat0;
+ #define ILF 0x80 /* sta: data in SIDL register lsb */
+ #define ORF 0x40 /* sta: data in SODR register lsb */
+ #define OLF 0x20 /* sta: data in SODL register lsb */
+ #define AIP 0x10 /* sta: arbitration in progress */
+ #define LOA 0x08 /* sta: arbitration lost */
+ #define WOA 0x04 /* sta: arbitration won */
+ #define IRST 0x02 /* sta: scsi reset signal */
+ #define SDP 0x01 /* sta: scsi parity signal */
+
+/*0e*/ u_char nc_sstat1;
+ #define FF3210 0xf0 /* sta: bytes in the scsi fifo */
+
+/*0f*/ u_char nc_sstat2;
+ #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/
+ #define ORF1 0x40 /* sta: data in SODR register msb[W]*/
+ #define OLF1 0x20 /* sta: data in SODL register msb[W]*/
+ #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */
+ #define LDSC 0x02 /* sta: disconnect & reconnect */
+
+/*10*/ u_char nc_dsa; /* --> Base page */
+/*11*/ u_char nc_dsa1;
+/*12*/ u_char nc_dsa2;
+/*13*/ u_char nc_dsa3;
+
+/*14*/ u_char nc_istat; /* --> Main Command and status */
+ #define CABRT 0x80 /* cmd: abort current operation */
+ #define SRST 0x40 /* mod: reset chip */
+ #define SIGP 0x20 /* r/w: message from host to ncr */
+ #define SEM 0x10 /* r/w: message between host + ncr */
+ #define CON 0x08 /* sta: connected to scsi */
+ #define INTF 0x04 /* sta: int on the fly (reset by wr)*/
+ #define SIP 0x02 /* sta: scsi-interrupt */
+ #define DIP 0x01 /* sta: host/script interrupt */
+
+/*15*/ u_char nc_istat1; /* 896 only */
+/*16*/ u_char nc_mbox0; /* 896 only */
+/*17*/ u_char nc_mbox1; /* 896 only */
+
+/*18*/ u_char nc_ctest0;
+/*19*/ u_char nc_ctest1;
+
+/*1a*/ u_char nc_ctest2;
+ #define CSIGP 0x40
+ /* bits 0-2,7 rsvd for C1010 */
+
+/*1b*/ u_char nc_ctest3;
+ #define FLF 0x08 /* cmd: flush dma fifo */
+ #define CLF 0x04 /* cmd: clear dma fifo */
+ #define FM 0x02 /* mod: fetch pin mode */
+ #define WRIE 0x01 /* mod: write and invalidate enable */
+ /* bits 4-7 rsvd for C1010 */
+
+/*1c*/ u_int32 nc_temp; /* ### Temporary stack */
+
+/*20*/ u_char nc_dfifo;
+/*21*/ u_char nc_ctest4;
+ #define BDIS 0x80 /* mod: burst disable */
+ #define MPEE 0x08 /* mod: master parity error enable */
+
+/*22*/ u_char nc_ctest5;
+ #define DFS 0x20 /* mod: dma fifo size */
+ /* bits 0-1, 3-7 rsvd for C1010 */
+/*23*/ u_char nc_ctest6;
+
+/*24*/ u_int32 nc_dbc; /* ### Byte count and command */
+/*28*/ u_int32 nc_dnad; /* ### Next command register */
+/*2c*/ u_int32 nc_dsp; /* --> Script Pointer */
+/*30*/ u_int32 nc_dsps; /* --> Script pointer save/opcode#2 */
+
+/*34*/ u_char nc_scratcha; /* Temporary register a */
+/*35*/ u_char nc_scratcha1;
+/*36*/ u_char nc_scratcha2;
+/*37*/ u_char nc_scratcha3;
+
+/*38*/ u_char nc_dmode;
+ #define BL_2 0x80 /* mod: burst length shift value +2 */
+ #define BL_1 0x40 /* mod: burst length shift value +1 */
+ #define ERL 0x08 /* mod: enable read line */
+ #define ERMP 0x04 /* mod: enable read multiple */
+ #define BOF 0x02 /* mod: burst op code fetch */
+
+/*39*/ u_char nc_dien;
+/*3a*/ u_char nc_sbr;
+
+/*3b*/ u_char nc_dcntl; /* --> Script execution control */
+ #define CLSE 0x80 /* mod: cache line size enable */
+ #define PFF 0x40 /* cmd: pre-fetch flush */
+ #define PFEN 0x20 /* mod: pre-fetch enable */
+ #define SSM 0x10 /* mod: single step mode */
+ #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */
+ #define STD 0x04 /* cmd: start dma mode */
+ #define IRQD 0x02 /* mod: irq disable */
+ #define NOCOM 0x01 /* cmd: protect sfbr while reselect */
+ /* bits 0-1 rsvd for C1010 */
+
+/*3c*/ u_int32 nc_adder;
+
+/*40*/ u_short nc_sien; /* -->: interrupt enable */
+/*42*/ u_short nc_sist; /* <--: interrupt status */
+ #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */
+ #define STO 0x0400/* sta: timeout (select) */
+ #define GEN 0x0200/* sta: timeout (general) */
+ #define HTH 0x0100/* sta: timeout (handshake) */
+ #define MA 0x80 /* sta: phase mismatch */
+ #define CMP 0x40 /* sta: arbitration complete */
+ #define SEL 0x20 /* sta: selected by another device */
+ #define RSL 0x10 /* sta: reselected by another device*/
+ #define SGE 0x08 /* sta: gross error (over/underflow)*/
+ #define UDC 0x04 /* sta: unexpected disconnect */
+ #define RST 0x02 /* sta: scsi bus reset detected */
+ #define PAR 0x01 /* sta: scsi parity error */
+
+/*44*/ u_char nc_slpar;
+/*45*/ u_char nc_swide;
+/*46*/ u_char nc_macntl;
+/*47*/ u_char nc_gpcntl;
+/*48*/ u_char nc_stime0; /* cmd: timeout for select&handshake*/
+/*49*/ u_char nc_stime1; /* cmd: timeout user defined */
+/*4a*/ u_short nc_respid; /* sta: Reselect-IDs */
+
+/*4c*/ u_char nc_stest0;
+
+/*4d*/ u_char nc_stest1;
+ #define SCLK 0x80 /* Use the PCI clock as SCSI clock */
+ #define DBLEN 0x08 /* clock doubler running */
+ #define DBLSEL 0x04 /* clock doubler selected */
+
+
+/*4e*/ u_char nc_stest2;
+ #define ROF 0x40 /* reset scsi offset (after gross error!) */
+ #define EXT 0x02 /* extended filtering */
+
+/*4f*/ u_char nc_stest3;
+ #define TE 0x80 /* c: tolerAnt enable */
+ #define HSC 0x20 /* c: Halt SCSI Clock */
+ #define CSF 0x02 /* c: clear scsi fifo */
+
+/*50*/ u_short nc_sidl; /* Lowlevel: latched from scsi data */
+/*52*/ u_char nc_stest4;
+ #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */
+ #define SMODE_HVD 0x40 /* High Voltage Differential */
+ #define SMODE_SE 0x80 /* Single Ended */
+ #define SMODE_LVD 0xc0 /* Low Voltage Differential */
+ #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */
+ /* bits 0-5 rsvd for C1010 */
+
+/*53*/ u_char nc_53_;
+/*54*/ u_short nc_sodl; /* Lowlevel: data out to scsi data */
+/*56*/ u_char nc_ccntl0; /* Chip Control 0 (896) */
+ #define ENPMJ 0x80 /* Enable Phase Mismatch Jump */
+ #define PMJCTL 0x40 /* Phase Mismatch Jump Control */
+ #define ENNDJ 0x20 /* Enable Non Data PM Jump */
+ #define DISFC 0x10 /* Disable Auto FIFO Clear */
+ #define DILS 0x02 /* Disable Internal Load/Store */
+ #define DPR 0x01 /* Disable Pipe Req */
+
+/*57*/ u_char nc_ccntl1; /* Chip Control 1 (896) */
+ #define ZMOD 0x80 /* High Impedance Mode */
+ #define DIC 0x10 /* Disable Internal Cycles */
+ #define DDAC 0x08 /* Disable Dual Address Cycle */
+ #define XTIMOD 0x04 /* 64-bit Table Ind. Indexing Mode */
+ #define EXTIBMV 0x02 /* Enable 64-bit Table Ind. BMOV */
+ #define EXDBMV 0x01 /* Enable 64-bit Direct BMOV */
+
+/*58*/ u_short nc_sbdl; /* Lowlevel: data from scsi data */
+/*5a*/ u_short nc_5a_;
+
+/*5c*/ u_char nc_scr0; /* Working register B */
+/*5d*/ u_char nc_scr1; /* */
+/*5e*/ u_char nc_scr2; /* */
+/*5f*/ u_char nc_scr3; /* */
+
+/*60*/ u_char nc_scrx[64]; /* Working register C-R */
+/*a0*/ u_int32 nc_mmrs; /* Memory Move Read Selector */
+/*a4*/ u_int32 nc_mmws; /* Memory Move Write Selector */
+/*a8*/ u_int32 nc_sfs; /* Script Fetch Selector */
+/*ac*/ u_int32 nc_drs; /* DSA Relative Selector */
+/*b0*/ u_int32 nc_sbms; /* Static Block Move Selector */
+/*b4*/ u_int32 nc_dbms; /* Dynamic Block Move Selector */
+/*b8*/ u_int32 nc_dnad64; /* DMA Next Address 64 */
+/*bc*/ u_short nc_scntl4; /* C1010 only */
+ #define U3EN 0x80 /* Enable Ultra 3 */
+ #define AIPEN 0x40 /* Allow check upper byte lanes */
+ #define XCLKH_DT 0x08 /* Extra clock of data hold on DT
+ transfer edge */
+ #define XCLKH_ST 0x04 /* Extra clock of data hold on ST
+ transfer edge */
+
+/*be*/ u_char nc_aipcntl0; /* Epat Control 1 C1010 only */
+/*bf*/ u_char nc_aipcntl1; /* AIP Control C1010_66 Only */
+
+/*c0*/ u_int32 nc_pmjad1; /* Phase Mismatch Jump Address 1 */
+/*c4*/ u_int32 nc_pmjad2; /* Phase Mismatch Jump Address 2 */
+/*c8*/ u_char nc_rbc; /* Remaining Byte Count */
+/*c9*/ u_char nc_rbc1; /* */
+/*ca*/ u_char nc_rbc2; /* */
+/*cb*/ u_char nc_rbc3; /* */
+
+/*cc*/ u_char nc_ua; /* Updated Address */
+/*cd*/ u_char nc_ua1; /* */
+/*ce*/ u_char nc_ua2; /* */
+/*cf*/ u_char nc_ua3; /* */
+/*d0*/ u_int32 nc_esa; /* Entry Storage Address */
+/*d4*/ u_char nc_ia; /* Instruction Address */
+/*d5*/ u_char nc_ia1;
+/*d6*/ u_char nc_ia2;
+/*d7*/ u_char nc_ia3;
+/*d8*/ u_int32 nc_sbc; /* SCSI Byte Count (3 bytes only) */
+/*dc*/ u_int32 nc_csbc; /* Cumulative SCSI Byte Count */
+
+ /* Following for C1010 only */
+/*e0*/ u_short nc_crcpad; /* CRC Value */
+/*e2*/ u_char nc_crccntl0; /* CRC control register */
+ #define SNDCRC 0x10 /* Send CRC Request */
+/*e3*/ u_char nc_crccntl1; /* CRC control register */
+/*e4*/ u_int32 nc_crcdata; /* CRC data register */
+/*e8*/ u_int32 nc_e8_; /* rsvd */
+/*ec*/ u_int32 nc_ec_; /* rsvd */
+/*f0*/ u_short nc_dfbc; /* DMA FIFO byte count */
+
+};
+
+/*-----------------------------------------------------------
+**
+** Utility macros for the script.
+**
+**-----------------------------------------------------------
+*/
+
+#define REGJ(p,r) (offsetof(struct ncr_reg, p ## r))
+#define REG(r) REGJ (nc_, r)
+
+typedef u_int32 ncrcmd;
+
+/*-----------------------------------------------------------
+**
+** SCSI phases
+**
+** DT phases illegal for ncr driver.
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_DATA_OUT 0x00000000
+#define SCR_DATA_IN 0x01000000
+#define SCR_COMMAND 0x02000000
+#define SCR_STATUS 0x03000000
+#define SCR_DT_DATA_OUT 0x04000000
+#define SCR_DT_DATA_IN 0x05000000
+#define SCR_MSG_OUT 0x06000000
+#define SCR_MSG_IN 0x07000000
+
+#define SCR_ILG_OUT 0x04000000
+#define SCR_ILG_IN 0x05000000
+
+/*-----------------------------------------------------------
+**
+** Data transfer via SCSI.
+**
+**-----------------------------------------------------------
+**
+** MOVE_ABS (LEN)
+** <<start address>>
+**
+** MOVE_IND (LEN)
+** <<dnad_offset>>
+**
+** MOVE_TBL
+** <<dnad_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define OPC_MOVE 0x08000000
+
+#define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l))
+#define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l))
+#define SCR_MOVE_TBL (0x10000000 | OPC_MOVE)
+
+#define SCR_CHMOV_ABS(l) ((0x00000000) | (l))
+#define SCR_CHMOV_IND(l) ((0x20000000) | (l))
+#define SCR_CHMOV_TBL (0x10000000)
+
+struct scr_tblmove {
+ u_int32 size;
+ u_int32 addr;
+};
+
+/*-----------------------------------------------------------
+**
+** Selection
+**
+**-----------------------------------------------------------
+**
+** SEL_ABS | SCR_ID (0..15) [ | REL_JMP]
+** <<alternate_address>>
+**
+** SEL_TBL | << dnad_offset>> [ | REL_JMP]
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SEL_ABS 0x40000000
+#define SCR_SEL_ABS_ATN 0x41000000
+#define SCR_SEL_TBL 0x42000000
+#define SCR_SEL_TBL_ATN 0x43000000
+
+struct scr_tblsel {
+ u_char sel_scntl4;
+ u_char sel_sxfer;
+ u_char sel_id;
+ u_char sel_scntl3;
+};
+
+#define SCR_JMP_REL 0x04000000
+#define SCR_ID(id) (((u_int32)(id)) << 16)
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** WAIT_DISC
+** dummy: <<alternate_address>>
+**
+** WAIT_RESEL
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_WAIT_DISC 0x48000000
+#define SCR_WAIT_RESEL 0x50000000
+
+/*-----------------------------------------------------------
+**
+** Bit Set / Reset
+**
+**-----------------------------------------------------------
+**
+** SET (flags {|.. })
+**
+** CLR (flags {|.. })
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SET(f) (0x58000000 | (f))
+#define SCR_CLR(f) (0x60000000 | (f))
+
+#define SCR_CARRY 0x00000400
+#define SCR_TRG 0x00000200
+#define SCR_ACK 0x00000040
+#define SCR_ATN 0x00000008
+
+
+
+
+/*-----------------------------------------------------------
+**
+** Memory to memory move
+**
+**-----------------------------------------------------------
+**
+** COPY (bytecount)
+** << source_address >>
+** << destination_address >>
+**
+** SCR_COPY sets the NO FLUSH option by default.
+** SCR_COPY_F does not set this option.
+**
+** For chips which do not support this option,
+** ncr_copy_and_bind() will remove this bit.
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_FLUSH 0x01000000
+
+#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n))
+#define SCR_COPY_F(n) (0xc0000000 | (n))
+
+/*-----------------------------------------------------------
+**
+** Register move and binary operations
+**
+**-----------------------------------------------------------
+**
+** SFBR_REG (reg, op, data) reg = SFBR op data
+** << 0 >>
+**
+** REG_SFBR (reg, op, data) SFBR = reg op data
+** << 0 >>
+**
+** REG_REG (reg, op, data) reg = reg op data
+** << 0 >>
+**
+**-----------------------------------------------------------
+** On 810A, 860, 825A, 875, 895 and 896 chips the content
+** of SFBR register can be used as data (SCR_SFBR_DATA).
+** The 896 has additionnal IO registers starting at
+** offset 0x80. Bit 7 of register offset is stored in
+** bit 7 of the SCRIPTS instruction first DWORD.
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS(ofs) ((((ofs) & 0x7f) << 16ul) + ((ofs) & 0x80))
+
+#define SCR_SFBR_REG(reg,op,data) \
+ (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_SFBR(reg,op,data) \
+ (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_REG(reg,op,data) \
+ (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+
+#define SCR_LOAD 0x00000000
+#define SCR_SHL 0x01000000
+#define SCR_OR 0x02000000
+#define SCR_XOR 0x03000000
+#define SCR_AND 0x04000000
+#define SCR_SHR 0x05000000
+#define SCR_ADD 0x06000000
+#define SCR_ADDC 0x07000000
+
+#define SCR_SFBR_DATA (0x00800000>>8ul) /* Use SFBR as data */
+
+/*-----------------------------------------------------------
+**
+** FROM_REG (reg) SFBR = reg
+** << 0 >>
+**
+** TO_REG (reg) reg = SFBR
+** << 0 >>
+**
+** LOAD_REG (reg, data) reg = <data>
+** << 0 >>
+**
+** LOAD_SFBR(data) SFBR = <data>
+** << 0 >>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_FROM_REG(reg) \
+ SCR_REG_SFBR(reg,SCR_OR,0)
+
+#define SCR_TO_REG(reg) \
+ SCR_SFBR_REG(reg,SCR_OR,0)
+
+#define SCR_LOAD_REG(reg,data) \
+ SCR_REG_REG(reg,SCR_LOAD,data)
+
+#define SCR_LOAD_SFBR(data) \
+ (SCR_REG_SFBR (gpreg, SCR_LOAD, data))
+
+/*-----------------------------------------------------------
+**
+** LOAD from memory to register.
+** STORE from register to memory.
+**
+** Only supported by 810A, 860, 825A, 875, 895 and 896.
+**
+**-----------------------------------------------------------
+**
+** LOAD_ABS (LEN)
+** <<start address>>
+**
+** LOAD_REL (LEN) (DSA relative)
+** <<dsa_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS2(ofs) (((ofs) & 0xff) << 16ul)
+#define SCR_NO_FLUSH2 0x02000000
+#define SCR_DSA_REL2 0x10000000
+
+#define SCR_LOAD_R(reg, how, n) \
+ (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_STORE_R(reg, how, n) \
+ (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n)
+#define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n)
+#define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n)
+
+#define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n)
+#define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n)
+#define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n)
+
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** JUMP [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** JUMPR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** CALL [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** CALLR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** RETURN [ | IFTRUE/IFFALSE ( ... ) ]
+** <<dummy>>
+**
+** INT [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** INT_FLY [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** Conditions:
+** WHEN (phase)
+** IF (phase)
+** CARRYSET
+** DATA (data, mask)
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_OP 0x80000000
+#define SCR_JUMP 0x80080000
+#define SCR_JUMP64 0x80480000
+#define SCR_JUMPR 0x80880000
+#define SCR_CALL 0x88080000
+#define SCR_CALLR 0x88880000
+#define SCR_RETURN 0x90080000
+#define SCR_INT 0x98080000
+#define SCR_INT_FLY 0x98180000
+
+#define IFFALSE(arg) (0x00080000 | (arg))
+#define IFTRUE(arg) (0x00000000 | (arg))
+
+#define WHEN(phase) (0x00030000 | (phase))
+#define IF(phase) (0x00020000 | (phase))
+
+#define DATA(D) (0x00040000 | ((D) & 0xff))
+#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff))
+
+#define CARRYSET (0x00200000)
+
+/*-----------------------------------------------------------
+**
+** SCSI constants.
+**
+**-----------------------------------------------------------
+*/
+
+/*
+** Messages
+*/
+
+#define M_COMPLETE (0x00)
+#define M_EXTENDED (0x01)
+#define M_SAVE_DP (0x02)
+#define M_RESTORE_DP (0x03)
+#define M_DISCONNECT (0x04)
+#define M_ID_ERROR (0x05)
+#define M_ABORT (0x06)
+#define M_REJECT (0x07)
+#define M_NOOP (0x08)
+#define M_PARITY (0x09)
+#define M_LCOMPLETE (0x0a)
+#define M_FCOMPLETE (0x0b)
+#define M_RESET (0x0c)
+#define M_ABORT_TAG (0x0d)
+#define M_CLEAR_QUEUE (0x0e)
+#define M_INIT_REC (0x0f)
+#define M_REL_REC (0x10)
+#define M_TERMINATE (0x11)
+#define M_SIMPLE_TAG (0x20)
+#define M_HEAD_TAG (0x21)
+#define M_ORDERED_TAG (0x22)
+#define M_IGN_RESIDUE (0x23)
+#define M_IDENTIFY (0x80)
+
+#define M_X_MODIFY_DP (0x00)
+#define M_X_SYNC_REQ (0x01)
+#define M_X_WIDE_REQ (0x03)
+#define M_X_PPR_REQ (0x04)
+
+/*
+** Status
+*/
+
+#define S_GOOD (0x00)
+#define S_CHECK_COND (0x02)
+#define S_COND_MET (0x04)
+#define S_BUSY (0x08)
+#define S_INT (0x10)
+#define S_INT_COND_MET (0x14)
+#define S_CONFLICT (0x18)
+#define S_TERMINATED (0x20)
+#define S_QUEUE_FULL (0x28)
+#define S_ILLEGAL (0xff)
+#define S_SENSE (0x80)
+
+/*
+ * End of ncrreg from FreeBSD
+ */
+
+#endif /* !defined HOSTS_C */
+
+#endif /* defined SYM53C8XX_DEFS_H */
diff --git a/linux/src/drivers/scsi/ultrastor.c b/linux/src/drivers/scsi/ultrastor.c
index a1a1982d..de824726 100644
--- a/linux/src/drivers/scsi/ultrastor.c
+++ b/linux/src/drivers/scsi/ultrastor.c
@@ -333,7 +333,7 @@ static void log_ultrastor_abort(register struct ultrastor_config *config,
{
static char fmt[80] = "abort %d (%x); MSCP free pool: %x;";
register int i;
- int flags;
+ unsigned long flags;
save_flags(flags);
cli();
@@ -681,7 +681,7 @@ int ultrastor_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
int mscp_index;
#endif
unsigned int status;
- int flags;
+ unsigned long flags;
/* Next test is for debugging; "can't happen" */
if ((config.mscp_free & ((1U << ULTRASTOR_MAX_CMDS) - 1)) == 0)
@@ -853,7 +853,7 @@ int ultrastor_abort(Scsi_Cmnd *SCpnt)
{
int port0 = (config.slot << 12) | 0xc80;
int i;
- int flags;
+ unsigned long flags;
save_flags(flags);
cli();
strcpy(out, "OGM %d:%x ICM %d:%x ports: ");
@@ -879,7 +879,7 @@ int ultrastor_abort(Scsi_Cmnd *SCpnt)
if (config.slot ? inb(config.icm_address - 1) == 2 :
(inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
{
- int flags;
+ unsigned long flags;
save_flags(flags);
printk("Ux4F: abort while completed command pending\n");
restore_flags(flags);
@@ -901,7 +901,7 @@ int ultrastor_abort(Scsi_Cmnd *SCpnt)
and the interrupt handler will call done. */
if (config.slot && inb(config.ogm_address - 1) == 0)
{
- int flags;
+ unsigned long flags;
save_flags(flags);
cli();
@@ -953,7 +953,7 @@ int ultrastor_abort(Scsi_Cmnd *SCpnt)
int ultrastor_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
{
- int flags;
+ unsigned long flags;
register int i;
#if (ULTRASTOR_DEBUG & UD_RESET)
printk("US14F: reset: called\n");
diff --git a/linux/src/drivers/scsi/wd7000.c b/linux/src/drivers/scsi/wd7000.c
index d910e27b..08d3ac3c 100644
--- a/linux/src/drivers/scsi/wd7000.c
+++ b/linux/src/drivers/scsi/wd7000.c
@@ -573,7 +573,7 @@ static int mail_out (Adapter *host, Scb *scbptr)
break;
}
else
- ogmb = (++ogmb) % OGMB_CNT;
+ ogmb = (ogmb + 1) % OGMB_CNT;
}
restore_flags (flags);
diff --git a/linux/src/include/asm-i386/io.h b/linux/src/include/asm-i386/io.h
index 98e32ce6..f961f1d2 100644
--- a/linux/src/include/asm-i386/io.h
+++ b/linux/src/include/asm-i386/io.h
@@ -25,6 +25,8 @@
* Linus
*/
+#include <machine/vm_param.h>
+
#ifdef SLOW_IO_BY_JUMPING
#define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:")
#else
@@ -45,12 +47,12 @@
*/
extern inline unsigned long virt_to_phys(volatile void * address)
{
- return (unsigned long) address;
+ return (unsigned long) _kvtophys(address);
}
extern inline void * phys_to_virt(unsigned long address)
{
- return (void *) address;
+ return (void *) phystokv(address);
}
/*
diff --git a/linux/src/include/asm-i386/posix_types.h b/linux/src/include/asm-i386/posix_types.h
index 712ef70c..6a04605a 100644
--- a/linux/src/include/asm-i386/posix_types.h
+++ b/linux/src/include/asm-i386/posix_types.h
@@ -15,8 +15,8 @@ typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_clock_t;
diff --git a/linux/src/include/asm-i386/string-486.h b/linux/src/include/asm-i386/string-486.h
deleted file mode 100644
index 1a337ba7..00000000
--- a/linux/src/include/asm-i386/string-486.h
+++ /dev/null
@@ -1,702 +0,0 @@
-#ifndef _I386_STRING_I486_H_
-#define _I386_STRING_I486_H_
-
-/*
- * This string-include defines all string functions as inline
- * functions. Use gcc. It also assumes ds=es=data space, this should be
- * normal. Most of the string-functions are rather heavily hand-optimized,
- * see especially strtok,strstr,str[c]spn. They should work, but are not
- * very easy to understand. Everything is done entirely within the register
- * set, making the functions fast and clean.
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- * Revised and optimized for i486/pentium
- * 1994/03/15 by Alberto Vignani/Davide Parodi @crf.it
- *
- * Split into 2 CPU specific files by Alan Cox to keep #ifdef noise down.
- */
-
-#define __HAVE_ARCH_STRCPY
-extern inline char * strcpy(char * dest,const char *src)
-{
-register char *tmp= (char *)dest;
-register char dummy;
-__asm__ __volatile__(
- "\n1:\t"
- "movb (%0),%b2\n\t"
- "incl %0\n\t"
- "movb %b2,(%1)\n\t"
- "incl %1\n\t"
- "testb %b2,%b2\n\t"
- "jne 1b"
- :"=r" (src), "=r" (tmp), "=q" (dummy)
- :"0" (src), "1" (tmp)
- :"memory");
-return dest;
-}
-
-#define __HAVE_ARCH_STRNCPY
-extern inline char * strncpy(char * dest,const char *src,size_t count)
-{
-register char *tmp= (char *)dest;
-register char dummy;
-if (count) {
-__asm__ __volatile__(
- "\n1:\t"
- "movb (%0),%b2\n\t"
- "incl %0\n\t"
- "movb %b2,(%1)\n\t"
- "incl %1\n\t"
- "decl %3\n\t"
- "je 3f\n\t"
- "testb %b2,%b2\n\t"
- "jne 1b\n\t"
- "2:\tmovb %b2,(%1)\n\t"
- "incl %1\n\t"
- "decl %3\n\t"
- "jne 2b\n\t"
- "3:"
- :"=r" (src), "=r" (tmp), "=q" (dummy), "=r" (count)
- :"0" (src), "1" (tmp), "3" (count)
- :"memory");
- } /* if (count) */
-return dest;
-}
-
-#define __HAVE_ARCH_STRCAT
-extern inline char * strcat(char * dest,const char * src)
-{
-register char *tmp = (char *)(dest-1);
-register char dummy;
-__asm__ __volatile__(
- "\n1:\tincl %1\n\t"
- "cmpb $0,(%1)\n\t"
- "jne 1b\n"
- "2:\tmovb (%2),%b0\n\t"
- "incl %2\n\t"
- "movb %b0,(%1)\n\t"
- "incl %1\n\t"
- "testb %b0,%b0\n\t"
- "jne 2b\n"
- :"=q" (dummy), "=r" (tmp), "=r" (src)
- :"1" (tmp), "2" (src)
- :"memory");
-return dest;
-}
-
-#define __HAVE_ARCH_STRNCAT
-extern inline char * strncat(char * dest,const char * src,size_t count)
-{
-register char *tmp = (char *)(dest-1);
-register char dummy;
-__asm__ __volatile__(
- "\n1:\tincl %1\n\t"
- "cmpb $0,(%1)\n\t"
- "jne 1b\n"
- "2:\tdecl %3\n\t"
- "js 3f\n\t"
- "movb (%2),%b0\n\t"
- "incl %2\n\t"
- "movb %b0,(%1)\n\t"
- "incl %1\n\t"
- "testb %b0,%b0\n\t"
- "jne 2b\n"
- "3:\txorb %b0,%b0\n\t"
- "movb %b0,(%1)\n\t"
- :"=q" (dummy), "=r" (tmp), "=r" (src), "=r" (count)
- :"1" (tmp), "2" (src), "3" (count)
- :"memory");
-return dest;
-}
-
-#define __HAVE_ARCH_STRCMP
-extern inline int strcmp(const char * cs,const char * ct)
-{
-register int __res;
-__asm__ __volatile__(
- "\n1:\tmovb (%1),%b0\n\t"
- "incl %1\n\t"
- "cmpb %b0,(%2)\n\t"
- "jne 2f\n\t"
- "incl %2\n\t"
- "testb %b0,%b0\n\t"
- "jne 1b\n\t"
- "xorl %k0,%k0\n\t"
- "jmp 3f\n"
- "2:\tmovl $1,%k0\n\t"
- "jb 3f\n\t"
- "negl %k0\n"
- "3:"
- :"=q" (__res), "=r" (cs), "=r" (ct)
- :"1" (cs), "2" (ct)
- : "memory" );
-return __res;
-}
-
-#define __HAVE_ARCH_STRNCMP
-extern inline int strncmp(const char * cs,const char * ct,size_t count)
-{
-register int __res;
-__asm__ __volatile__(
- "\n1:\tdecl %3\n\t"
- "js 2f\n\t"
- "movb (%1),%b0\n\t"
- "incl %1\n\t"
- "cmpb %b0,(%2)\n\t"
- "jne 3f\n\t"
- "incl %2\n\t"
- "testb %b0,%b0\n\t"
- "jne 1b\n"
- "2:\txorl %k0,%k0\n\t"
- "jmp 4f\n"
- "3:\tmovl $1,%k0\n\t"
- "jb 4f\n\t"
- "negl %k0\n"
- "4:"
- :"=q" (__res), "=r" (cs), "=r" (ct), "=r" (count)
- :"1" (cs), "2" (ct), "3" (count));
-return __res;
-}
-
-#define __HAVE_ARCH_STRCHR
-extern inline char * strchr(const char * s, int c)
-{
-register char * __res;
-__asm__ __volatile__(
- "movb %%al,%%ah\n"
- "1:\tmovb (%1),%%al\n\t"
- "cmpb %%ah,%%al\n\t"
- "je 2f\n\t"
- "incl %1\n\t"
- "testb %%al,%%al\n\t"
- "jne 1b\n\t"
- "xorl %1,%1\n"
- "2:\tmovl %1,%0\n\t"
- :"=a" (__res), "=r" (s)
- :"0" (c), "1" (s));
-return __res;
-}
-
-#define __HAVE_ARCH_STRRCHR
-extern inline char * strrchr(const char * s, int c)
-{
-register char * __res;
-__asm__ __volatile__(
- "cld\n\t"
- "movb %%al,%%ah\n"
- "1:\tlodsb\n\t"
- "cmpb %%ah,%%al\n\t"
- "jne 2f\n\t"
- "leal -1(%%esi),%0\n"
- "2:\ttestb %%al,%%al\n\t"
- "jne 1b"
- :"=d" (__res):"0" (0),"S" (s),"a" (c):"ax","si");
-return __res;
-}
-
-#define __HAVE_ARCH_STRSPN
-extern inline size_t strspn(const char * cs, const char * ct)
-{
-register char * __res;
-__asm__ __volatile__(
- "cld\n\t"
- "movl %4,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t"
- "movl %%ecx,%%edx\n"
- "1:\tlodsb\n\t"
- "testb %%al,%%al\n\t"
- "je 2f\n\t"
- "movl %4,%%edi\n\t"
- "movl %%edx,%%ecx\n\t"
- "repne\n\t"
- "scasb\n\t"
- "je 1b\n"
- "2:\tdecl %0"
- :"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct)
- :"ax","cx","dx","di");
-return __res-cs;
-}
-
-#define __HAVE_ARCH_STRCSPN
-extern inline size_t strcspn(const char * cs, const char * ct)
-{
-register char * __res;
-__asm__ __volatile__(
- "cld\n\t"
- "movl %4,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t"
- "movl %%ecx,%%edx\n"
- "1:\tlodsb\n\t"
- "testb %%al,%%al\n\t"
- "je 2f\n\t"
- "movl %4,%%edi\n\t"
- "movl %%edx,%%ecx\n\t"
- "repne\n\t"
- "scasb\n\t"
- "jne 1b\n"
- "2:\tdecl %0"
- :"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct)
- :"ax","cx","dx","di");
-return __res-cs;
-}
-
-#define __HAVE_ARCH_STRPBRK
-extern inline char * strpbrk(const char * cs,const char * ct)
-{
-register char * __res;
-__asm__ __volatile__(
- "cld\n\t"
- "movl %4,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t"
- "movl %%ecx,%%edx\n"
- "1:\tlodsb\n\t"
- "testb %%al,%%al\n\t"
- "je 2f\n\t"
- "movl %4,%%edi\n\t"
- "movl %%edx,%%ecx\n\t"
- "repne\n\t"
- "scasb\n\t"
- "jne 1b\n\t"
- "decl %0\n\t"
- "jmp 3f\n"
- "2:\txorl %0,%0\n"
- "3:"
- :"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct)
- :"ax","cx","dx","di");
-return __res;
-}
-
-#define __HAVE_ARCH_STRSTR
-extern inline char * strstr(const char * cs,const char * ct)
-{
-register char * __res;
-__asm__ __volatile__(
- "cld\n\t" \
- "movl %4,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */
- "movl %%ecx,%%edx\n"
- "1:\tmovl %4,%%edi\n\t"
- "movl %%esi,%%eax\n\t"
- "movl %%edx,%%ecx\n\t"
- "repe\n\t"
- "cmpsb\n\t"
- "je 2f\n\t" /* also works for empty string, see above */
- "xchgl %%eax,%%esi\n\t"
- "incl %%esi\n\t"
- "cmpb $0,-1(%%eax)\n\t"
- "jne 1b\n\t"
- "xorl %%eax,%%eax\n\t"
- "2:"
- :"=a" (__res):"0" (0),"c" (0xffffffff),"S" (cs),"g" (ct)
- :"cx","dx","di","si");
-return __res;
-}
-
-#define __HAVE_ARCH_STRLEN
-extern inline size_t strlen(const char * s)
-{
-/*
- * slightly slower on a 486, but with better chances of
- * register allocation
- */
-register char dummy, *tmp= (char *)s;
-__asm__ __volatile__(
- "\n1:\t"
- "movb\t(%0),%1\n\t"
- "incl\t%0\n\t"
- "testb\t%1,%1\n\t"
- "jne\t1b"
- :"=r" (tmp),"=q" (dummy)
- :"0" (s)
- : "memory" );
-return (tmp-s-1);
-}
-
-/* Added by Gertjan van Wingerde to make minix and sysv module work */
-#define __HAVE_ARCH_STRNLEN
-extern inline size_t strnlen(const char * s, size_t count)
-{
-register int __res;
-__asm__ __volatile__(
- "movl %1,%0\n\t"
- "jmp 2f\n"
- "1:\tcmpb $0,(%0)\n\t"
- "je 3f\n\t"
- "incl %0\n"
- "2:\tdecl %2\n\t"
- "cmpl $-1,%2\n\t"
- "jne 1b\n"
- "3:\tsubl %1,%0"
- :"=a" (__res)
- :"c" (s),"d" (count)
- :"dx");
-return __res;
-}
-/* end of additional stuff */
-
-#define __HAVE_ARCH_STRTOK
-extern inline char * strtok(char * s,const char * ct)
-{
-register char * __res;
-__asm__ __volatile__(
- "testl %1,%1\n\t"
- "jne 1f\n\t"
- "testl %0,%0\n\t"
- "je 8f\n\t"
- "movl %0,%1\n"
- "1:\txorl %0,%0\n\t"
- "movl $-1,%%ecx\n\t"
- "xorl %%eax,%%eax\n\t"
- "cld\n\t"
- "movl %4,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
- "notl %%ecx\n\t"
- "decl %%ecx\n\t"
- "je 7f\n\t" /* empty delimiter-string */
- "movl %%ecx,%%edx\n"
- "2:\tlodsb\n\t"
- "testb %%al,%%al\n\t"
- "je 7f\n\t"
- "movl %4,%%edi\n\t"
- "movl %%edx,%%ecx\n\t"
- "repne\n\t"
- "scasb\n\t"
- "je 2b\n\t"
- "decl %1\n\t"
- "cmpb $0,(%1)\n\t"
- "je 7f\n\t"
- "movl %1,%0\n"
- "3:\tlodsb\n\t"
- "testb %%al,%%al\n\t"
- "je 5f\n\t"
- "movl %4,%%edi\n\t"
- "movl %%edx,%%ecx\n\t"
- "repne\n\t"
- "scasb\n\t"
- "jne 3b\n\t"
- "decl %1\n\t"
- "cmpb $0,(%1)\n\t"
- "je 5f\n\t"
- "movb $0,(%1)\n\t"
- "incl %1\n\t"
- "jmp 6f\n"
- "5:\txorl %1,%1\n"
- "6:\tcmpb $0,(%0)\n\t"
- "jne 7f\n\t"
- "xorl %0,%0\n"
- "7:\ttestl %0,%0\n\t"
- "jne 8f\n\t"
- "movl %0,%1\n"
- "8:"
- :"=b" (__res),"=S" (___strtok)
- :"0" (___strtok),"1" (s),"g" (ct)
- :"ax","cx","dx","di","memory");
-return __res;
-}
-
-#define __memcpy_c(d,s,count) \
-((count%4==0) ? \
- __memcpy_by4((d),(s),(count)) : \
- ((count%2==0) ? \
- __memcpy_by2((d),(s),(count)) : \
- __memcpy_g((d),(s),(count))))
-
-#define __HAVE_ARCH_MEMCPY
-#define memcpy(d,s,count) \
-(count == 0 \
- ? d \
- : __builtin_constant_p(count) \
- ? __memcpy_c((d),(s),(count)) \
- : __memcpy_g((d),(s),(count)))
-
-/*
- * These ought to get tweaked to do some cache priming.
- */
-
-extern inline void * __memcpy_by4(void * to, const void * from, size_t n)
-{
-register void *tmp = (void *)to;
-register int dummy1,dummy2;
-__asm__ __volatile__ (
- "\n1:\tmovl (%2),%0\n\t"
- "addl $4,%2\n\t"
- "movl %0,(%1)\n\t"
- "addl $4,%1\n\t"
- "decl %3\n\t"
- "jnz 1b"
- :"=r" (dummy1), "=r" (tmp), "=r" (from), "=r" (dummy2)
- :"1" (tmp), "2" (from), "3" (n/4)
- :"memory");
-return to;
-}
-
-extern inline void * __memcpy_by2(void * to, const void * from, size_t n)
-{
-register void *tmp = (void *)to;
-register int dummy1,dummy2;
-__asm__ __volatile__ (
- "shrl $1,%3\n\t"
- "jz 2f\n" /* only a word */
- "1:\tmovl (%2),%0\n\t"
- "addl $4,%2\n\t"
- "movl %0,(%1)\n\t"
- "addl $4,%1\n\t"
- "decl %3\n\t"
- "jnz 1b\n"
- "2:\tmovw (%2),%w0\n\t"
- "movw %w0,(%1)"
- :"=r" (dummy1), "=r" (tmp), "=r" (from), "=r" (dummy2)
- :"1" (tmp), "2" (from), "3" (n/2)
- :"memory");
-return to;
-}
-
-extern inline void * __memcpy_g(void * to, const void * from, size_t n)
-{
-register void *tmp = (void *)to;
-__asm__ __volatile__ (
- "cld\n\t"
- "shrl $1,%%ecx\n\t"
- "jnc 1f\n\t"
- "movsb\n"
- "1:\tshrl $1,%%ecx\n\t"
- "jnc 2f\n\t"
- "movsw\n"
- "2:\trep\n\t"
- "movsl"
- : /* no output */
- :"c" (n),"D" ((long) tmp),"S" ((long) from)
- :"cx","di","si","memory");
-return to;
-}
-
-
-#define __HAVE_ARCH_MEMMOVE
-extern inline void * memmove(void * dest,const void * src, size_t n)
-{
-register void *tmp = (void *)dest;
-if (dest<src)
-__asm__ __volatile__ (
- "cld\n\t"
- "rep\n\t"
- "movsb"
- : /* no output */
- :"c" (n),"S" (src),"D" (tmp)
- :"cx","si","di");
-else
-__asm__ __volatile__ (
- "std\n\t"
- "rep\n\t"
- "movsb\n\t"
- "cld\n\t"
- : /* no output */
- :"c" (n), "S" (n-1+(const char *)src), "D" (n-1+(char *)tmp)
- :"cx","si","di","memory");
-return dest;
-}
-
-extern inline int memcmp(const void * cs,const void * ct,size_t count)
-{
-register int __res;
-__asm__ __volatile__(
- "cld\n\t"
- "repe\n\t"
- "cmpsb\n\t"
- "je 1f\n\t"
- "sbbl %0,%0\n\t"
- "orb $1,%b0\n"
- "1:"
- :"=abd" (__res):"0" (0),"S" (cs),"D" (ct),"c" (count)
- :"si","di","cx");
-return __res;
-}
-
-#define __HAVE_ARCH_MEMCHR
-extern inline void * memchr(const void * cs,int c,size_t count)
-{
-register void * __res;
-if (!count)
- return NULL;
-__asm__ __volatile__(
- "cld\n\t"
- "repne\n\t"
- "scasb\n\t"
- "je 1f\n\t"
- "movl $1,%0\n"
- "1:\tdecl %0"
- :"=D" (__res):"a" (c),"D" (cs),"c" (count)
- :"cx");
-return __res;
-}
-
-#define __memset_cc(s,c,count) \
-((count%4==0) ? \
- __memset_cc_by4((s),(c),(count)) : \
- ((count%2==0) ? \
- __memset_cc_by2((s),(c),(count)) : \
- __memset_cg((s),(c),(count))))
-
-#define __memset_gc(s,c,count) \
-((count%4==0) ? \
- __memset_gc_by4((s),(c),(count)) : \
- ((count%2==0) ? \
- __memset_gc_by2((s),(c),(count)) : \
- __memset_gg((s),(c),(count))))
-
-#define __HAVE_ARCH_MEMSET
-#define memset(s,c,count) \
-(count == 0 \
- ? s \
- : __builtin_constant_p(c) \
- ? __builtin_constant_p(count) \
- ? __memset_cc((s),(c),(count)) \
- : __memset_cg((s),(c),(count)) \
- : __builtin_constant_p(count) \
- ? __memset_gc((s),(c),(count)) \
- : __memset_gg((s),(c),(count)))
-
-extern inline void * __memset_cc_by4(void * s, char c, size_t count)
-{
-/*
- * register char *tmp = s;
- */
-register char *tmp = (char *)s;
-register int dummy;
-__asm__ __volatile__ (
- "\n1:\tmovl %2,(%0)\n\t"
- "addl $4,%0\n\t"
- "decl %1\n\t"
- "jnz 1b"
- :"=r" (tmp), "=r" (dummy)
- :"r" (0x01010101UL * (unsigned char) c), "0" (tmp), "1" (count/4)
- :"memory");
-return s;
-}
-
-extern inline void * __memset_cc_by2(void * s, char c, size_t count)
-{
-register void *tmp = (void *)s;
-register int dummy;
-__asm__ __volatile__ (
- "shrl $1,%1\n\t" /* may be divisible also by 4 */
- "jz 2f\n"
- "\n1:\tmovl %2,(%0)\n\t"
- "addl $4,%0\n\t"
- "decl %1\n\t"
- "jnz 1b\n"
- "2:\tmovw %w2,(%0)"
- :"=r" (tmp), "=r" (dummy)
- :"r" (0x01010101UL * (unsigned char) c), "0" (tmp), "1" (count/2)
- :"memory");
-return s;
-}
-
-extern inline void * __memset_gc_by4(void * s, char c, size_t count)
-{
-register void *tmp = (void *)s;
-register int dummy;
-__asm__ __volatile__ (
- "movb %b0,%h0\n"
- "pushw %w0\n\t"
- "shll $16,%k0\n\t"
- "popw %w0\n"
- "1:\tmovl %k0,(%1)\n\t"
- "addl $4,%1\n\t"
- "decl %2\n\t"
- "jnz 1b\n"
- :"=q" (c), "=r" (tmp), "=r" (dummy)
- :"0" ((unsigned) c), "1" (tmp), "2" (count/4)
- :"memory");
-return s;
-}
-
-extern inline void * __memset_gc_by2(void * s, char c, size_t count)
-{
-register void *tmp = (void *)s;
-register int dummy1,dummy2;
-__asm__ __volatile__ (
- "movb %b0,%h0\n\t"
- "shrl $1,%2\n\t" /* may be divisible also by 4 */
- "jz 2f\n\t"
- "pushw %w0\n\t"
- "shll $16,%k0\n\t"
- "popw %w0\n"
- "1:\tmovl %k0,(%1)\n\t"
- "addl $4,%1\n\t"
- "decl %2\n\t"
- "jnz 1b\n"
- "2:\tmovw %w0,(%1)"
- :"=q" (dummy1), "=r" (tmp), "=r" (dummy2)
- :"0" ((unsigned) c), "1" (tmp), "2" (count/2)
- :"memory");
-return s;
-}
-
-extern inline void * __memset_cg(void * s, char c, size_t count)
-{
-register void *tmp = (void *)s;
-__asm__ __volatile__ (
- "shrl $1,%%ecx\n\t"
- "cld\n\t"
- "rep\n\t"
- "stosw\n\t"
- "jnc 1f\n\t"
- "movb %%al,(%%edi)\n"
- "1:"
- : /* no output */
- :"c" (count),"D" (tmp), "a" (0x0101U * (unsigned char) c)
- :"cx","di","memory");
-return s;
-}
-
-extern inline void * __memset_gg(void * s,char c,size_t count)
-{
-register void *tmp = (void *)s;
-__asm__ __volatile__ (
- "movb %%al,%%ah\n\t"
- "shrl $1,%%ecx\n\t"
- "cld\n\t"
- "rep\n\t"
- "stosw\n\t"
- "jnc 1f\n\t"
- "movb %%al,(%%edi)\n"
- "1:"
- : /* no output */
- :"c" (count),"D" (tmp), "a" (c)
- :"cx","di","memory");
-return s;
-}
-
-
-/*
- * find the first occurrence of byte 'c', or 1 past the area if none
- */
-#define __HAVE_ARCH_MEMSCAN
-extern inline void * memscan(void * addr, int c, size_t size)
-{
- if (!size)
- return addr;
- __asm__("cld
- repnz; scasb
- jnz 1f
- dec %%edi
-1: "
- : "=D" (addr), "=c" (size)
- : "0" (addr), "1" (size), "a" (c));
- return addr;
-}
-
-#endif
diff --git a/linux/src/include/asm-i386/types.h b/linux/src/include/asm-i386/types.h
index 71309ac8..d792546f 100644
--- a/linux/src/include/asm-i386/types.h
+++ b/linux/src/include/asm-i386/types.h
@@ -39,6 +39,8 @@ typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
+#define BITS_PER_LONG 32
+
#endif /* __KERNEL__ */
#endif
diff --git a/linux/src/include/linux/compatmac.h b/linux/src/include/linux/compatmac.h
index b9a41127..95370702 100644
--- a/linux/src/include/linux/compatmac.h
+++ b/linux/src/include/linux/compatmac.h
@@ -47,6 +47,7 @@
#define COMPATMAC_H
#include <linux/version.h>
+#include <asm/io.h>
#if LINUX_VERSION_CODE < 0x020100 /* Less than 2.1.0 */
#define TWO_ZERO
@@ -96,11 +97,11 @@ static inline unsigned char get_irq (unsigned char bus, unsigned char fn)
static inline void *ioremap(unsigned long base, long length)
{
- if (base < 0x100000) return (void *)base;
+ if (base < 0x100000) return phys_to_virt(base);
return vremap (base, length);
}
-#define my_iounmap(x, b) (((long)x<0x100000)?0:vfree ((void*)x))
+#define my_iounmap(x, b) (((long)x<(long)phys_to_virt(0x100000))?0:vfree ((void*)x))
#define capable(x) suser()
diff --git a/linux/src/include/linux/compiler-gcc.h b/linux/src/include/linux/compiler-gcc.h
new file mode 100644
index 00000000..59e4028e
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc.h
@@ -0,0 +1,106 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/*
+ * Common definitions for all gcc versions go here.
+ */
+
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+/*
+ * This macro obfuscates arithmetic on a variable address so that gcc
+ * shouldn't recognize the original var, and make assumptions about it.
+ *
+ * This is needed because the C standard makes it undefined to do
+ * pointer arithmetic on "objects" outside their boundaries and the
+ * gcc optimizers assume this is the case. In particular they
+ * assume such arithmetic does not wrap.
+ *
+ * A miscompilation has been observed because of this on PPC.
+ * To work around it we hide the relationship of the pointer and the object
+ * using this macro.
+ *
+ * Versions of the ppc64 compiler before 4.1 had a bug where use of
+ * RELOC_HIDE could trash r30. The bug can be worked around by changing
+ * the inline assembly constraint from =g to =r, in this particular
+ * case either is valid.
+ */
+#define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); })
+
+#ifdef __CHECKER__
+#define __must_be_array(arr) 0
+#else
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+#endif
+
+/*
+ * Force always-inline if the user requests it so via the .config,
+ * or if gcc is too old:
+ */
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
+# define inline inline __attribute__((always_inline))
+# define __inline__ __inline__ __attribute__((always_inline))
+# define __inline __inline __attribute__((always_inline))
+#endif
+
+#define __deprecated __attribute__((deprecated))
+#define __packed __attribute__((packed))
+#define __weak __attribute__((weak))
+
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked) to trace
+ * naked functions because then mcount is called without stack and frame pointer
+ * being set up and there is no chance to restore the lr register to the value
+ * before mcount was called.
+ *
+ * The asm() bodies of naked functions often depend on standard calling conventions,
+ * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
+ * this, so we must do so ourselves. See GCC PR44290.
+ */
+#define __naked __attribute__((naked)) noinline __noclone notrace
+
+#define __noreturn __attribute__((noreturn))
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions have no effects except the return value and their
+ * return value depends only on the parameters and/or global
+ * variables. Such a function can be subject to common subexpression
+ * elimination and loop optimization just as an arithmetic operator
+ * would be.
+ * [...]
+ */
+#define __pure __attribute__((pure))
+#define __aligned(x) __attribute__((aligned(x)))
+#define __printf(a,b) __attribute__((format(printf,a,b)))
+#define noinline __attribute__((noinline))
+#define __attribute_const__ __attribute__((__const__))
+#define __maybe_unused __attribute__((unused))
+#define __always_unused __attribute__((unused))
+
+#define __gcc_header(x) #x
+#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
+#define gcc_header(x) _gcc_header(x)
+#include gcc_header(__GNUC__)
+
+#if !defined(__noclone)
+#define __noclone /* not needed */
+#endif
+
+/*
+ * A trick to suppress uninitialized variable warning without generating any
+ * code
+ */
+#define uninitialized_var(x) x = x
+
+#define __always_inline inline __attribute__((always_inline))
diff --git a/linux/src/include/linux/compiler-gcc3.h b/linux/src/include/linux/compiler-gcc3.h
new file mode 100644
index 00000000..37d41243
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc3.h
@@ -0,0 +1,23 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#if __GNUC_MINOR__ < 2
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if __GNUC_MINOR__ >= 3
+# define __used __attribute__((__used__))
+#else
+# define __used __attribute__((__unused__))
+#endif
+
+#if __GNUC_MINOR__ >= 4
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if __GNUC_MINOR__ < 4
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
diff --git a/linux/src/include/linux/compiler-gcc4.h b/linux/src/include/linux/compiler-gcc4.h
new file mode 100644
index 00000000..dfadc96e
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc4.h
@@ -0,0 +1,57 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
+# error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+
+#if __GNUC_MINOR__ >= 3
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+
+#if __GNUC_MINOR__ >= 5
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+#endif
+#endif
+
+#if __GNUC_MINOR__ > 0
+#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+#if __GNUC_MINOR__ >= 4 && !defined(__CHECKER__)
+#define __compiletime_warning(message) __attribute__((warning(message)))
+#define __compiletime_error(message) __attribute__((error(message)))
+#endif
diff --git a/linux/src/include/linux/compiler.h b/linux/src/include/linux/compiler.h
new file mode 100644
index 00000000..320d6c94
--- /dev/null
+++ b/linux/src/include/linux/compiler.h
@@ -0,0 +1,311 @@
+#ifndef __LINUX_COMPILER_H
+#define __LINUX_COMPILER_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef __CHECKER__
+# define __user __attribute__((noderef, address_space(1)))
+# define __kernel __attribute__((address_space(0)))
+# define __safe __attribute__((safe))
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __iomem __attribute__((noderef, address_space(2)))
+# define __acquires(x) __attribute__((context(x,0,1)))
+# define __releases(x) __attribute__((context(x,1,0)))
+# define __acquire(x) __context__(x,1)
+# define __release(x) __context__(x,-1)
+# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu __attribute__((noderef, address_space(3)))
+#ifdef CONFIG_SPARSE_RCU_POINTER
+# define __rcu __attribute__((noderef, address_space(4)))
+#else
+# define __rcu
+#endif
+extern void __chk_user_ptr(const volatile void __user *);
+extern void __chk_io_ptr(const volatile void __iomem *);
+#else
+# define __user
+# define __kernel
+# define __safe
+# define __force
+# define __nocast
+# define __iomem
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+# define __builtin_warning(x, y...) (1)
+# define __acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x,c) (c)
+# define __percpu
+# define __rcu
+#endif
+
+#ifdef __KERNEL__
+
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#define notrace __attribute__((no_instrument_function))
+
+/* Intel compiler defines __GNUC__. So we will overwrite implementations
+ * coming from above header files here
+ */
+#ifdef __INTEL_COMPILER
+# include <linux/compiler-intel.h>
+#endif
+
+/*
+ * Generic compiler-dependent macros required for kernel
+ * build go below this comment. Actual compiler/compiler version
+ * specific implementations come from the above header files
+ */
+
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+
+#define likely_notrace(x) __builtin_expect(!!(x), 1)
+#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
+
+#define __branch_check__(x, expect) ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_annotated_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = likely_notrace(x); \
+ ftrace_likely_update(&______f, ______r, expect); \
+ ______r; \
+ })
+
+/*
+ * Using __builtin_constant_p(x) to ignore cases where the return
+ * value is always the same. This idea is taken from a similar patch
+ * written by Daniel Walker.
+ */
+# ifndef likely
+# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+# endif
+# ifndef unlikely
+# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+# endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+/*
+ * "Define 'is'", Bill Clinton
+ * "Define 'if'", Steven Rostedt
+ */
+#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+#define __trace_if(cond) \
+ if (__builtin_constant_p((cond)) ? !!(cond) : \
+ ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = !!(cond); \
+ ______f.miss_hit[______r]++; \
+ ______r; \
+ }))
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+#else
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+/* Optimization barrier */
+#ifndef barrier
+# define barrier() __memory_barrier()
+#endif
+
+/* Unreachable code */
+#ifndef unreachable
+# define unreachable() do { } while (1)
+#endif
+
+#ifndef RELOC_HIDE
+# define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __ptr = (unsigned long) (ptr); \
+ (typeof(ptr)) (__ptr + (off)); })
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+/*
+ * Allow us to mark functions as 'deprecated' and have gcc emit a nice
+ * warning for each use, in hopes of speeding the functions removal.
+ * Usage is:
+ * int __deprecated foo(void)
+ */
+#ifndef __deprecated
+# define __deprecated /* unimplemented */
+#endif
+
+#ifdef MODULE
+#define __deprecated_for_modules __deprecated
+#else
+#define __deprecated_for_modules
+#endif
+
+#ifndef __must_check
+#define __must_check
+#endif
+
+#ifndef CONFIG_ENABLE_MUST_CHECK
+#undef __must_check
+#define __must_check
+#endif
+#ifndef CONFIG_ENABLE_WARN_DEPRECATED
+#undef __deprecated
+#undef __deprecated_for_modules
+#define __deprecated
+#define __deprecated_for_modules
+#endif
+
+/*
+ * Allow us to avoid 'defined but not used' warnings on functions and data,
+ * as well as force them to be emitted to the assembly file.
+ *
+ * As of gcc 3.4, static functions that are not marked with attribute((used))
+ * may be elided from the assembly file. As of gcc 3.4, static data not so
+ * marked will not be elided, but this may change in a future gcc version.
+ *
+ * NOTE: Because distributions shipped with a backported unit-at-a-time
+ * compiler in gcc 3.3, we must define __used to be __attribute__((used))
+ * for gcc >=3.3 instead of 3.4.
+ *
+ * In prior versions of gcc, such functions and data would be emitted, but
+ * would be warned about except with attribute((unused)).
+ *
+ * Mark functions that are referenced only in inline assembly as __used so
+ * the code is emitted even though it appears to be unreferenced.
+ */
+#ifndef __used
+# define __used /* unimplemented */
+#endif
+
+#ifndef __maybe_unused
+# define __maybe_unused /* unimplemented */
+#endif
+
+#ifndef __always_unused
+# define __always_unused /* unimplemented */
+#endif
+
+#ifndef noinline
+#define noinline
+#endif
+
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentaiton reasons.
+ */
+#define noinline_for_stack noinline
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+
+#endif /* __KERNEL__ */
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions do not examine any values except their arguments,
+ * and have no effects except the return value. Basically this is
+ * just slightly more strict class than the `pure' attribute above,
+ * since function is not allowed to read global memory.
+ *
+ * Note that a function that has pointer arguments and examines the
+ * data pointed to must _not_ be declared `const'. Likewise, a
+ * function that calls a non-`const' function usually must not be
+ * `const'. It does not make sense for a `const' function to return
+ * `void'.
+ */
+#ifndef __attribute_const__
+# define __attribute_const__ /* unimplemented */
+#endif
+
+/*
+ * Tell gcc if a function is cold. The compiler will assume any path
+ * directly leading to the call is unlikely.
+ */
+
+#ifndef __cold
+#define __cold
+#endif
+
+/* Simple shorthand for a section definition */
+#ifndef __section
+# define __section(S) __attribute__ ((__section__(#S)))
+#endif
+
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
+/* Compile time object size, -1 for unknown */
+#ifndef __compiletime_object_size
+# define __compiletime_object_size(obj) -1
+#endif
+#ifndef __compiletime_warning
+# define __compiletime_warning(message)
+#endif
+#ifndef __compiletime_error
+# define __compiletime_error(message)
+#endif
+
+/*
+ * Prevent the compiler from merging or refetching accesses. The compiler
+ * is also forbidden from reordering successive instances of ACCESS_ONCE(),
+ * but only when the compiler is aware of some particular ordering. One way
+ * to make the compiler aware of ordering is to put the two invocations of
+ * ACCESS_ONCE() in different C statements.
+ *
+ * This macro does absolutely -nothing- to prevent the CPU from reordering,
+ * merging, or refetching absolutely anything at any time. Its main intended
+ * use is to mediate communication between process-level code and irq/NMI
+ * handlers, all running on the same CPU.
+ */
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+
+#endif /* __LINUX_COMPILER_H */
diff --git a/linux/src/include/linux/ctype.h b/linux/src/include/linux/ctype.h
index 838ef933..8acfe312 100644
--- a/linux/src/include/linux/ctype.h
+++ b/linux/src/include/linux/ctype.h
@@ -1,6 +1,11 @@
#ifndef _LINUX_CTYPE_H
#define _LINUX_CTYPE_H
+/*
+ * NOTE! This ctype does not handle EOF like the standard C
+ * library is required to.
+ */
+
#define _U 0x01 /* upper */
#define _L 0x02 /* lower */
#define _D 0x04 /* digit */
@@ -10,25 +15,50 @@
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
-extern unsigned char _ctype[];
-extern char _ctmp;
-
-#define isalnum(c) ((_ctype+1)[c]&(_U|_L|_D))
-#define isalpha(c) ((_ctype+1)[c]&(_U|_L))
-#define iscntrl(c) ((_ctype+1)[c]&(_C))
-#define isdigit(c) ((_ctype+1)[c]&(_D))
-#define isgraph(c) ((_ctype+1)[c]&(_P|_U|_L|_D))
-#define islower(c) ((_ctype+1)[c]&(_L))
-#define isprint(c) ((_ctype+1)[c]&(_P|_U|_L|_D|_SP))
-#define ispunct(c) ((_ctype+1)[c]&(_P))
-#define isspace(c) ((_ctype+1)[c]&(_S))
-#define isupper(c) ((_ctype+1)[c]&(_U))
-#define isxdigit(c) ((_ctype+1)[c]&(_D|_X))
-
-#define isascii(c) (((unsigned) c)<=0x7f)
-#define toascii(c) (((unsigned) c)&0x7f)
-
-#define tolower(c) (_ctmp=c,isupper(_ctmp)?_ctmp-('A'-'a'):_ctmp)
-#define toupper(c) (_ctmp=c,islower(_ctmp)?_ctmp-('a'-'A'):_ctmp)
+extern const unsigned char _ctype[];
+
+#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
+
+#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
+#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
+#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
+#define isdigit(c) ((__ismask(c)&(_D)) != 0)
+#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
+#define islower(c) ((__ismask(c)&(_L)) != 0)
+#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
+#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+/* Note: isspace() must return false for %NUL-terminator */
+#define isspace(c) ((__ismask(c)&(_S)) != 0)
+#define isupper(c) ((__ismask(c)&(_U)) != 0)
+#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
+
+#define isascii(c) (((unsigned char)(c))<=0x7f)
+#define toascii(c) (((unsigned char)(c))&0x7f)
+
+static inline unsigned char __tolower(unsigned char c)
+{
+ if (isupper(c))
+ c -= 'A'-'a';
+ return c;
+}
+
+static inline unsigned char __toupper(unsigned char c)
+{
+ if (islower(c))
+ c -= 'a'-'A';
+ return c;
+}
+
+#define tolower(c) __tolower(c)
+#define toupper(c) __toupper(c)
+
+/*
+ * Fast implementation of tolower() for internal usage. Do not use in your
+ * code.
+ */
+static inline char _tolower(const char c)
+{
+ return c | 0x20;
+}
#endif
diff --git a/linux/src/include/linux/hdreg.h b/linux/src/include/linux/hdreg.h
index 2645deea..e223480d 100644
--- a/linux/src/include/linux/hdreg.h
+++ b/linux/src/include/linux/hdreg.h
@@ -92,11 +92,12 @@ struct hd_geometry {
#define HDIO_GETGEO 0x0301 /* get device geometry */
#define HDIO_GET_UNMASKINTR 0x0302 /* get current unmask setting */
#define HDIO_GET_MULTCOUNT 0x0304 /* get current IDE blockmode setting */
-#define HDIO_GET_IDENTITY 0x0307 /* get IDE identification info */
+#define HDIO_OBSOLETE_IDENTITY 0x0307 /* OBSOLETE, DO NOT USE: returns 142 bytes */
#define HDIO_GET_KEEPSETTINGS 0x0308 /* get keep-settings-on-reset flag */
#define HDIO_GET_32BIT 0x0309 /* get current io_32bit setting */
#define HDIO_GET_NOWERR 0x030a /* get ignore-write-error flag */
#define HDIO_GET_DMA 0x030b /* get use-dma flag */
+#define HDIO_GET_IDENTITY 0x030d /* get IDE identification info */
#define HDIO_DRIVE_CMD 0x031f /* execute a special drive command */
/* hd/ide ctl's that pass (arg) non-ptr values are numbered 0x032n/0x033n */
@@ -166,14 +167,54 @@ struct hd_driveid {
unsigned short word79;
unsigned short word80;
unsigned short word81;
- unsigned short word82;
- unsigned short word83;
+ unsigned short command_sets; /* bits 0:Smart 1:Security 2:Removable 3:PM */
+ unsigned short word83; /* bits 14:Smart Enabled 13:0 zero */
unsigned short word84;
unsigned short word85;
unsigned short word86;
unsigned short word87;
unsigned short dma_ultra;
- unsigned short reserved[167];
+ unsigned short word89; /* reserved (word 89) */
+ unsigned short word90; /* reserved (word 90) */
+ unsigned short word91; /* reserved (word 91) */
+ unsigned short word92; /* reserved (word 92) */
+ unsigned short word93; /* reserved (word 93) */
+ unsigned short word94; /* reserved (word 94) */
+ unsigned short word95; /* reserved (word 95) */
+ unsigned short word96; /* reserved (word 96) */
+ unsigned short word97; /* reserved (word 97) */
+ unsigned short word98; /* reserved (word 98) */
+ unsigned short word99; /* reserved (word 99) */
+ unsigned short word100; /* reserved (word 100) */
+ unsigned short word101; /* reserved (word 101) */
+ unsigned short word102; /* reserved (word 102) */
+ unsigned short word103; /* reserved (word 103) */
+ unsigned short word104; /* reserved (word 104) */
+ unsigned short word105; /* reserved (word 105) */
+ unsigned short word106; /* reserved (word 106) */
+ unsigned short word107; /* reserved (word 107) */
+ unsigned short word108; /* reserved (word 108) */
+ unsigned short word109; /* reserved (word 109) */
+ unsigned short word110; /* reserved (word 110) */
+ unsigned short word111; /* reserved (word 111) */
+ unsigned short word112; /* reserved (word 112) */
+ unsigned short word113; /* reserved (word 113) */
+ unsigned short word114; /* reserved (word 114) */
+ unsigned short word115; /* reserved (word 115) */
+ unsigned short word116; /* reserved (word 116) */
+ unsigned short word117; /* reserved (word 117) */
+ unsigned short word118; /* reserved (word 118) */
+ unsigned short word119; /* reserved (word 119) */
+ unsigned short word120; /* reserved (word 120) */
+ unsigned short word121; /* reserved (word 121) */
+ unsigned short word122; /* reserved (word 122) */
+ unsigned short word123; /* reserved (word 123) */
+ unsigned short word124; /* reserved (word 124) */
+ unsigned short word125; /* reserved (word 125) */
+ unsigned short word126; /* reserved (word 126) */
+ unsigned short word127; /* reserved (word 127) */
+ unsigned short security; /* bits 0:suuport 1:enabled 2:locked 3:frozen */
+ unsigned short reserved[127];
};
#ifdef __KERNEL__
diff --git a/linux/pcmcia-cs/include/linux/init.h b/linux/src/include/linux/init.h
index 06da72d0..d4798b25 100644
--- a/linux/pcmcia-cs/include/linux/init.h
+++ b/linux/src/include/linux/init.h
@@ -1,9 +1,17 @@
#ifndef _COMPAT_INIT_H
#define _COMPAT_INIT_H
+#include <linux/compiler.h>
+
+#ifdef MODULE
+#define __exitused
+#else
+#define __exitused __used
+#endif
+
#define __init
#define __initdata
-#define __exit
+#define __exit __exitused __cold notrace
#define __exitdata
#define __devinit
#define __devinitdata
diff --git a/linux/src/include/linux/interrupt.h b/linux/src/include/linux/interrupt.h
index 8654a4f2..5765260d 100644
--- a/linux/src/include/linux/interrupt.h
+++ b/linux/src/include/linux/interrupt.h
@@ -14,11 +14,11 @@ struct irqaction {
struct irqaction *next;
};
-extern unsigned long intr_count;
+extern unsigned int intr_count;
extern int bh_mask_count[32];
-extern unsigned long bh_active;
-extern unsigned long bh_mask;
+extern unsigned int bh_active;
+extern unsigned int bh_mask;
extern void (*bh_base[32])(void);
asmlinkage void do_bottom_half(void);
diff --git a/linux/src/include/linux/kcomp.h b/linux/src/include/linux/kcomp.h
index 1f7344a6..5e06d7e7 100644
--- a/linux/src/include/linux/kcomp.h
+++ b/linux/src/include/linux/kcomp.h
@@ -7,8 +7,6 @@
#include <linux/netdevice.h>
#include <linux/pagemap.h>
-#define __exit
-
#define pci_enable_device(x) 0
#define page_address(x) (x | PAGE_OFFSET)
diff --git a/linux/src/include/linux/module.h b/linux/src/include/linux/module.h
index bc364f79..acc25403 100644
--- a/linux/src/include/linux/module.h
+++ b/linux/src/include/linux/module.h
@@ -101,9 +101,12 @@ int Using_Versions; /* gcc will handle this global (used as a flag) correctly */
#else
+#define EXPORT_SYMBOL(sym)
+
#define MOD_INC_USE_COUNT do { } while (0)
#define MOD_DEC_USE_COUNT do { } while (0)
#define MOD_IN_USE 1
+#define SET_MODULE_OWNER(dev) do{ } while(0)
#endif
diff --git a/linux/src/include/linux/stddef.h b/linux/src/include/linux/stddef.h
index c6221e71..488d49c0 100644
--- a/linux/src/include/linux/stddef.h
+++ b/linux/src/include/linux/stddef.h
@@ -3,7 +3,7 @@
#ifndef _SIZE_T
#define _SIZE_T
-typedef unsigned int size_t;
+typedef unsigned long size_t;
#endif
#undef NULL
diff --git a/linux/src/init/main.c b/linux/src/init/main.c
index d41ec60c..1aa15b97 100644
--- a/linux/src/init/main.c
+++ b/linux/src/init/main.c
@@ -42,6 +42,8 @@
#include <asm/bugs.h>
+#include <linux/dev/glue/glue.h>
+
/*
* Versions of gcc older than that listed below may actually compile
* and link okay, but the end product can have subtle run time bugs.
@@ -64,7 +66,6 @@ extern int bdflush(void *);
extern int kswapd(void *);
extern void kswapd_setup(void);
-extern void init_IRQ(void);
extern void init_modules(void);
extern long console_init(long, long);
extern long kmalloc_init(long,long);
diff --git a/linux/src/kernel/softirq.c b/linux/src/kernel/softirq.c
index 022b5535..32038b15 100644
--- a/linux/src/kernel/softirq.c
+++ b/linux/src/kernel/softirq.c
@@ -21,18 +21,18 @@
#include <asm/irq.h>
#include <asm/bitops.h>
-unsigned long intr_count = 0;
+unsigned int intr_count = 0;
int bh_mask_count[32];
-unsigned long bh_active = 0;
-unsigned long bh_mask = 0;
+unsigned int bh_active = 0;
+unsigned int bh_mask = 0;
void (*bh_base[32])(void);
asmlinkage void do_bottom_half(void)
{
- unsigned long active;
- unsigned long mask, left;
+ unsigned int active;
+ unsigned int mask, left;
void (**bh)(void);
sti();
diff --git a/linux/src/lib/ctype.c b/linux/src/lib/ctype.c
index c5be2641..26baa620 100644
--- a/linux/src/lib/ctype.c
+++ b/linux/src/lib/ctype.c
@@ -5,32 +5,32 @@
*/
#include <linux/ctype.h>
+#include <linux/module.h>
-char _ctmp;
-unsigned char _ctype[] = {0x00, /* EOF */
-_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
-_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
-_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
-_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
-_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
-_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
-_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
-_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
-_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
-_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
-_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
-_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
-_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
-_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
-_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
-_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
-_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
-_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
-_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
-_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
-_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
-_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
-
+const unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+EXPORT_SYMBOL(_ctype);
diff --git a/tests/.gitignore b/tests/.gitignore
new file mode 100644
index 00000000..6e86af1f
--- /dev/null
+++ b/tests/.gitignore
@@ -0,0 +1 @@
+/test-mbchk
diff --git a/vm/memory_object.c b/vm/memory_object.c
index 57dde76b..e281c6a3 100644
--- a/vm/memory_object.c
+++ b/vm/memory_object.c
@@ -891,34 +891,6 @@ MACRO_END
return (KERN_SUCCESS);
}
-/*
- * Old version of memory_object_lock_request.
- */
-kern_return_t
-xxx_memory_object_lock_request(object, offset, size,
- should_clean, should_flush, prot,
- reply_to, reply_to_type)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_size_t size;
- boolean_t should_clean;
- boolean_t should_flush;
- vm_prot_t prot;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
-{
- register int should_return;
-
- if (should_clean)
- should_return = MEMORY_OBJECT_RETURN_DIRTY;
- else
- should_return = MEMORY_OBJECT_RETURN_NONE;
-
- return(memory_object_lock_request(object,offset,size,
- should_return, should_flush, prot,
- reply_to, reply_to_type));
-}
-
kern_return_t
memory_object_set_attributes_common(object, object_ready, may_cache,
copy_strategy, use_old_pageout)
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c
index 4d81a687..4fed312e 100644
--- a/vm/memory_object_proxy.c
+++ b/vm/memory_object_proxy.c
@@ -40,13 +40,15 @@
#include <mach/kern_return.h>
#include <mach/notify.h>
#include <mach/vm_prot.h>
-#include <kern/zalloc.h>
-#include <kern/mach_param.h>
+#include <kern/printf.h>
+#include <kern/slab.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
-/* The zone which holds our proxy memory objects. */
-static zone_t memory_object_proxy_zone;
+#include <vm/memory_object_proxy.h>
+
+/* The cache which holds our proxy memory objects. */
+static struct kmem_cache memory_object_proxy_cache;
struct memory_object_proxy
{
@@ -61,13 +63,8 @@ typedef struct memory_object_proxy *memory_object_proxy_t;
void
memory_object_proxy_init (void)
{
- /* For limit, see PORT_MAX. */
- memory_object_proxy_zone = zinit (sizeof (struct memory_object_proxy), 0,
- (TASK_MAX * 3 + THREAD_MAX)
- * sizeof (struct memory_object_proxy),
- 256 * sizeof (struct memory_object_proxy),
- ZONE_EXHAUSTIBLE,
- "proxy memory object zone");
+ kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy",
+ sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0);
}
/* Lookup a proxy memory object by its port. */
@@ -125,7 +122,6 @@ memory_object_create_proxy (ipc_space_t space, vm_prot_t max_protection,
vm_offset_t *len, natural_t len_count,
ipc_port_t *port)
{
- kern_return_t kr;
memory_object_proxy_t proxy;
ipc_port_t notify;
@@ -151,13 +147,13 @@ memory_object_create_proxy (ipc_space_t space, vm_prot_t max_protection,
if (start[0] != 0 || len[0] != (vm_offset_t) ~0)
return KERN_INVALID_ARGUMENT;
- proxy = (memory_object_proxy_t) zalloc (memory_object_proxy_zone);
+ proxy = (memory_object_proxy_t) kmem_cache_alloc (&memory_object_proxy_cache);
/* Allocate port, keeping a reference for it. */
proxy->port = ipc_port_alloc_kernel ();
if (proxy->port == IP_NULL)
{
- zfree (memory_object_proxy_zone, (vm_offset_t) proxy);
+ kmem_cache_free (&memory_object_proxy_cache, (vm_offset_t) proxy);
return KERN_RESOURCE_SHORTAGE;
}
/* Associate the port with the proxy memory object. */
diff --git a/vm/memory_object_proxy.h b/vm/memory_object_proxy.h
new file mode 100644
index 00000000..f4be0d0d
--- /dev/null
+++ b/vm/memory_object_proxy.h
@@ -0,0 +1,48 @@
+/* memory_object_proxy.h - Proxy memory objects for Mach.
+ Copyright (C) 2005, 2011 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#ifndef _VM_MEMORY_OBJECT_PROXY_H_
+#define _VM_MEMORY_OBJECT_PROXT_H_
+
+#include <ipc/ipc_types.h>
+#include <mach/boolean.h>
+#include <mach/machine/kern_return.h>
+#include <mach/machine/vm_types.h>
+#include <mach/message.h>
+#include <mach/vm_prot.h>
+
+extern void memory_object_proxy_init (void);
+extern boolean_t memory_object_proxy_notify (mach_msg_header_t *msg);
+extern kern_return_t memory_object_create_proxy (ipc_space_t space,
+ vm_prot_t max_protection,
+ ipc_port_t *object,
+ natural_t object_count,
+ vm_offset_t *offset,
+ natural_t offset_count,
+ vm_offset_t *start,
+ natural_t start_count,
+ vm_offset_t *len,
+ natural_t len_count,
+ ipc_port_t *port);
+extern kern_return_t memory_object_proxy_lookup (ipc_port_t port,
+ ipc_port_t *object,
+ vm_prot_t *max_protection);
+
+#endif /* _VM_MEMORY_OBJECT_PROXT_H_ */
diff --git a/vm/vm_external.c b/vm/vm_external.c
index ac47faa3..e9643ffc 100644
--- a/vm/vm_external.c
+++ b/vm/vm_external.c
@@ -31,7 +31,7 @@
*/
#include <mach/boolean.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <vm/vm_external.h>
#include <mach/vm_param.h>
#include <kern/assert.h>
@@ -40,7 +40,7 @@
boolean_t vm_external_unsafe = FALSE;
-zone_t vm_external_zone = ZONE_NULL;
+struct kmem_cache vm_external_cache;
/*
* The implementation uses bit arrays to record whether
@@ -52,8 +52,8 @@ zone_t vm_external_zone = ZONE_NULL;
#define SMALL_SIZE (VM_EXTERNAL_SMALL_SIZE/8)
#define LARGE_SIZE (VM_EXTERNAL_LARGE_SIZE/8)
-zone_t vm_object_small_existence_map_zone;
-zone_t vm_object_large_existence_map_zone;
+struct kmem_cache vm_object_small_existence_map_cache;
+struct kmem_cache vm_object_large_existence_map_cache;
vm_external_t vm_external_create(size)
@@ -62,20 +62,17 @@ vm_external_t vm_external_create(size)
vm_external_t result;
vm_size_t bytes;
- if (vm_external_zone == ZONE_NULL)
- return(VM_EXTERNAL_NULL);
-
- result = (vm_external_t) zalloc(vm_external_zone);
+ result = (vm_external_t) kmem_cache_alloc(&vm_external_cache);
result->existence_map = (char *) 0;
bytes = (atop(size) + 07) >> 3;
if (bytes <= SMALL_SIZE) {
result->existence_map =
- (char *) zalloc(vm_object_small_existence_map_zone);
+ (char *) kmem_cache_alloc(&vm_object_small_existence_map_cache);
result->existence_size = SMALL_SIZE;
} else if (bytes <= LARGE_SIZE) {
result->existence_map =
- (char *) zalloc(vm_object_large_existence_map_zone);
+ (char *) kmem_cache_alloc(&vm_object_large_existence_map_cache);
result->existence_size = LARGE_SIZE;
}
return(result);
@@ -89,14 +86,14 @@ void vm_external_destroy(e)
if (e->existence_map != (char *) 0) {
if (e->existence_size <= SMALL_SIZE) {
- zfree(vm_object_small_existence_map_zone,
+ kmem_cache_free(&vm_object_small_existence_map_cache,
(vm_offset_t) e->existence_map);
} else {
- zfree(vm_object_large_existence_map_zone,
+ kmem_cache_free(&vm_object_large_existence_map_cache,
(vm_offset_t) e->existence_map);
}
}
- zfree(vm_external_zone, (vm_offset_t) e);
+ kmem_cache_free(&vm_external_cache, (vm_offset_t) e);
}
vm_external_state_t _vm_external_state_get(e, offset)
@@ -142,18 +139,14 @@ void vm_external_module_initialize(void)
{
vm_size_t size = (vm_size_t) sizeof(struct vm_external);
- vm_external_zone = zinit(size, 0, 16*1024*size, size,
- 0, "external page bitmaps");
+ kmem_cache_init(&vm_external_cache, "vm_external", size, 0,
+ NULL, NULL, NULL, 0);
- vm_object_small_existence_map_zone = zinit(SMALL_SIZE, 0,
- round_page(LARGE_SIZE * SMALL_SIZE),
- round_page(SMALL_SIZE),
- ZONE_EXHAUSTIBLE,
- "object small existence maps");
+ kmem_cache_init(&vm_object_small_existence_map_cache,
+ "small_existence_map", SMALL_SIZE, 0,
+ NULL, NULL, NULL, 0);
- vm_object_large_existence_map_zone = zinit(LARGE_SIZE, 0,
- round_page(8 * LARGE_SIZE),
- round_page(LARGE_SIZE),
- ZONE_EXHAUSTIBLE,
- "object large existence maps");
+ kmem_cache_init(&vm_object_large_existence_map_cache,
+ "large_existence_map", LARGE_SIZE, 0,
+ NULL, NULL, NULL, 0);
}
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index cce043a1..7e849616 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -51,9 +51,8 @@
#include <mach/memory_object.h>
#include <vm/memory_object_user.user.h>
/* For memory_object_data_{request,unlock} */
-#include <kern/mach_param.h>
#include <kern/macro_help.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#if MACH_PCSAMPLE
#include <kern/pc_sample.h>
@@ -85,7 +84,7 @@ typedef struct vm_fault_state {
vm_prot_t vmfp_access;
} vm_fault_state_t;
-zone_t vm_fault_state_zone = 0;
+struct kmem_cache vm_fault_state_cache;
int vm_object_absent_max = 50;
@@ -107,10 +106,8 @@ extern struct db_watchpoint *db_watchpoint_list;
*/
void vm_fault_init(void)
{
- vm_fault_state_zone = zinit(sizeof(vm_fault_state_t), 0,
- THREAD_MAX * sizeof(vm_fault_state_t),
- sizeof(vm_fault_state_t),
- 0, "vm fault state");
+ kmem_cache_init(&vm_fault_state_cache, "vm_fault_state",
+ sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0);
}
/*
@@ -257,6 +254,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY);
vm_stat.faults++; /* needs lock XXX */
+ current_task()->faults++;
/*
* Recovery actions
@@ -474,6 +472,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS);
vm_stat.zero_fill_count++;
+ current_task()->zero_fills++;
vm_object_lock(object);
pmap_clear_modify(m->phys_addr);
break;
@@ -555,6 +554,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
if (m->inactive) {
vm_stat_sample(SAMPLED_PC_VM_REACTIVATION_FAULTS);
vm_stat.reactivations++;
+ current_task()->reactivations++;
}
VM_PAGE_QUEUES_REMOVE(m);
@@ -654,13 +654,14 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_stat.pageins++;
vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS);
+ current_task()->pageins++;
if ((rc = memory_object_data_request(object->pager,
object->pager_request,
m->offset + object->paging_offset,
PAGE_SIZE, access_required)) != KERN_SUCCESS) {
if (rc != MACH_SEND_INTERRUPTED)
- printf("%s(0x%p, 0x%p, 0x%x, 0x%x, 0x%x) failed, %x\n",
+ printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n",
"memory_object_data_request",
object->pager,
object->pager_request,
@@ -743,6 +744,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_page_zero_fill(m);
vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS);
vm_stat.zero_fill_count++;
+ current_task()->zero_fills++;
vm_object_lock(object);
pmap_clear_modify(m->phys_addr);
break;
@@ -858,6 +860,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_stat.cow_faults++;
vm_stat_sample(SAMPLED_PC_VM_COW_FAULTS);
+ current_task()->cow_faults++;
object = first_object;
offset = first_offset;
@@ -1206,12 +1209,12 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
/*
* if this assignment stmt is written as
- * 'active_threads[cpu_number()] = zalloc()',
- * cpu_number may be evaluated before zalloc;
- * if zalloc blocks, cpu_number will be wrong
+ * 'active_threads[cpu_number()] = kmem_cache_alloc()',
+ * cpu_number may be evaluated before kmem_cache_alloc;
+ * if kmem_cache_alloc blocks, cpu_number will be wrong
*/
- state = (char *) zalloc(vm_fault_state_zone);
+ state = (char *) kmem_cache_alloc(&vm_fault_state_cache);
current_thread()->ith_other = state;
}
@@ -1490,7 +1493,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
register vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
- zfree(vm_fault_state_zone, (vm_offset_t) state);
+ kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state);
(*continuation)(kr);
/*NOTREACHED*/
}
@@ -1641,6 +1644,7 @@ kern_return_t vm_fault_wire_fast(map, va, entry)
vm_prot_t prot;
vm_stat.faults++; /* needs lock XXX */
+ current_task()->faults++;
/*
* Recovery actions
*/
diff --git a/vm/vm_init.c b/vm/vm_init.c
index f6a40602..89eb0984 100644
--- a/vm/vm_init.c
+++ b/vm/vm_init.c
@@ -35,7 +35,7 @@
*/
#include <mach/machine/vm_types.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <kern/kalloc.h>
#include <vm/vm_fault.h>
#include <vm/vm_object.h>
@@ -43,7 +43,7 @@
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#include <vm/memory_object.h>
-
+#include <vm/memory_object_proxy.h>
/*
@@ -67,12 +67,12 @@ void vm_mem_bootstrap()
* Initialize other VM packages
*/
- zone_bootstrap();
+ slab_bootstrap();
vm_object_bootstrap();
vm_map_init();
kmem_init(start, end);
pmap_init();
- zone_init();
+ slab_init();
kalloc_init();
vm_fault_init();
vm_page_module_init();
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
index cfa33ff8..fd46e982 100644
--- a/vm/vm_kern.c
+++ b/vm/vm_kern.c
@@ -58,7 +58,8 @@
* Variables exported by this module.
*/
-vm_map_t kernel_map;
+static struct vm_map kernel_map_store;
+vm_map_t kernel_map = &kernel_map_store;
vm_map_t kernel_pageable_map;
extern void kmem_alloc_pages();
@@ -811,27 +812,27 @@ kmem_remap_pages(object, offset, start, end, protection)
}
/*
- * kmem_suballoc:
+ * kmem_submap:
*
- * Allocates a map to manage a subrange
+ * Initializes a map to manage a subrange
* of the kernel virtual address space.
*
* Arguments are as follows:
*
+ * map Map to initialize
* parent Map to take range from
* size Size of range to find
* min, max Returned endpoints of map
* pageable Can the region be paged
*/
-vm_map_t
-kmem_suballoc(parent, min, max, size, pageable)
- vm_map_t parent;
+void
+kmem_submap(map, parent, min, max, size, pageable)
+ vm_map_t map, parent;
vm_offset_t *min, *max;
vm_size_t size;
boolean_t pageable;
{
- vm_map_t map;
vm_offset_t addr;
kern_return_t kr;
@@ -850,20 +851,16 @@ kmem_suballoc(parent, min, max, size, pageable)
vm_submap_object, (vm_offset_t) 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS)
- panic("kmem_suballoc");
+ panic("kmem_submap");
pmap_reference(vm_map_pmap(parent));
- map = vm_map_create(vm_map_pmap(parent), addr, addr + size, pageable);
- if (map == VM_MAP_NULL)
- panic("kmem_suballoc");
-
+ vm_map_setup(map, vm_map_pmap(parent), addr, addr + size, pageable);
kr = vm_map_submap(parent, addr, addr + size, map);
if (kr != KERN_SUCCESS)
- panic("kmem_suballoc");
+ panic("kmem_submap");
*min = addr;
*max = addr + size;
- return map;
}
/*
@@ -876,9 +873,8 @@ void kmem_init(start, end)
vm_offset_t start;
vm_offset_t end;
{
- kernel_map = vm_map_create(pmap_kernel(),
- VM_MIN_KERNEL_ADDRESS, end,
- FALSE);
+ vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end,
+ FALSE);
/*
* Reserve virtual memory allocated up to this time.
diff --git a/vm/vm_kern.h b/vm/vm_kern.h
index ca93d7a4..22b7c123 100644
--- a/vm/vm_kern.h
+++ b/vm/vm_kern.h
@@ -58,8 +58,8 @@ extern kern_return_t kmem_realloc(vm_map_t, vm_offset_t, vm_size_t,
vm_offset_t *, vm_size_t);
extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
-extern vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *,
- vm_size_t, boolean_t);
+extern void kmem_submap(vm_map_t, vm_map_t, vm_offset_t *,
+ vm_offset_t *, vm_size_t, boolean_t);
extern kern_return_t kmem_io_map_copyout(vm_map_t, vm_offset_t *,
vm_offset_t *, vm_size_t *,
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 751e0314..47db118f 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -41,7 +41,9 @@
#include <mach/vm_param.h>
#include <kern/assert.h>
#include <kern/debug.h>
-#include <kern/zalloc.h>
+#include <kern/kalloc.h>
+#include <kern/rbtree.h>
+#include <kern/slab.h>
#include <vm/pmap.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
@@ -51,6 +53,11 @@
#include <vm/vm_kern.h>
#include <ipc/ipc_port.h>
+#if MACH_KDB
+#include <ddb/db_output.h>
+#endif /* MACH_KDB */
+
+
/* Forward declarations */
kern_return_t vm_map_delete(
vm_map_t map,
@@ -70,7 +77,7 @@ void vm_map_copy_page_discard (vm_map_copy_t copy);
* map entry to the same memory - the wired count in the new entry
* must be set to zero. vm_map_entry_copy_full() creates a new
* entry that is identical to the old entry. This preserves the
- * wire count; it's used for map splitting and zone changing in
+ * wire count; it's used for map splitting and cache changing in
* vm_map_copyout.
*/
#define vm_map_entry_copy(NEW,OLD) \
@@ -94,7 +101,7 @@ MACRO_END
* Synchronization is required prior to most operations.
*
* Maps consist of an ordered doubly-linked list of simple
- * entries; a single hint is used to speed up lookups.
+ * entries; a hint and a red-black tree are used to speed up lookups.
*
* Sharing maps have been deleted from this version of Mach.
* All shared objects are now mapped directly into the respective
@@ -130,10 +137,10 @@ MACRO_END
* vm_object_copy_strategically() in vm_object.c.
*/
-zone_t vm_map_zone; /* zone for vm_map structures */
-zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */
-zone_t vm_map_kentry_zone; /* zone for kernel entry structures */
-zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */
+struct kmem_cache vm_map_cache; /* cache for vm_map structures */
+struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */
+struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */
+struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */
boolean_t vm_map_lookup_entry(); /* forward declaration */
@@ -143,7 +150,8 @@ boolean_t vm_map_lookup_entry(); /* forward declaration */
* vm_map_submap creates the submap.
*/
-vm_object_t vm_submap_object;
+static struct vm_object vm_submap_object_store;
+vm_object_t vm_submap_object = &vm_submap_object_store;
/*
* vm_map_init:
@@ -151,51 +159,82 @@ vm_object_t vm_submap_object;
* Initialize the vm_map module. Must be called before
* any other vm_map routines.
*
- * Map and entry structures are allocated from zones -- we must
- * initialize those zones.
+ * Map and entry structures are allocated from caches -- we must
+ * initialize those caches.
*
- * There are three zones of interest:
+ * There are three caches of interest:
*
- * vm_map_zone: used to allocate maps.
- * vm_map_entry_zone: used to allocate map entries.
- * vm_map_kentry_zone: used to allocate map entries for the kernel.
+ * vm_map_cache: used to allocate maps.
+ * vm_map_entry_cache: used to allocate map entries.
+ * vm_map_kentry_cache: used to allocate map entries for the kernel.
*
- * The kernel allocates map entries from a special zone that is initially
- * "crammed" with memory. It would be difficult (perhaps impossible) for
- * the kernel to allocate more memory to a entry zone when it became
- * empty since the very act of allocating memory implies the creation
- * of a new entry.
+ * Kernel map entries are allocated from a special cache, using a custom
+ * page allocation function to avoid recursion. It would be difficult
+ * (perhaps impossible) for the kernel to allocate more memory to an entry
+ * cache when it became empty since the very act of allocating memory
+ * implies the creation of a new entry.
*/
vm_offset_t kentry_data;
-vm_size_t kentry_data_size;
-int kentry_count = 256; /* to init kentry_data_size */
+vm_size_t kentry_data_size = KENTRY_DATA_SIZE;
-void vm_map_init(void)
+static vm_offset_t kentry_pagealloc(vm_size_t size)
{
- vm_map_zone = zinit((vm_size_t) sizeof(struct vm_map), 0, 40*1024,
- PAGE_SIZE, 0, "maps");
- vm_map_entry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry),
- 0, 1024*1024, PAGE_SIZE*5,
- 0, "non-kernel map entries");
- vm_map_kentry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry), 0,
- kentry_data_size, kentry_data_size,
- ZONE_FIXED /* XXX */, "kernel map entries");
-
- vm_map_copy_zone = zinit((vm_size_t) sizeof(struct vm_map_copy),
- 0, 16*1024, PAGE_SIZE, 0,
- "map copies");
+ vm_offset_t result;
- /*
- * Cram the kentry zone with initial data.
- */
- zcram(vm_map_kentry_zone, kentry_data, kentry_data_size);
+ if (size > kentry_data_size)
+ panic("vm_map: kentry memory exhausted");
+
+ result = kentry_data;
+ kentry_data += size;
+ kentry_data_size -= size;
+ return result;
+}
+
+void vm_map_init(void)
+{
+ kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0,
+ NULL, NULL, NULL, 0);
+ kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
+ sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0);
+ kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry",
+ sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc,
+ NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB
+ | KMEM_CACHE_NORECLAIM);
+ kmem_cache_init(&vm_map_copy_cache, "vm_map_copy",
+ sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0);
/*
* Submap object is initialized by vm_object_init.
*/
}
+void vm_map_setup(map, pmap, min, max, pageable)
+ vm_map_t map;
+ pmap_t pmap;
+ vm_offset_t min, max;
+ boolean_t pageable;
+{
+ vm_map_first_entry(map) = vm_map_to_entry(map);
+ vm_map_last_entry(map) = vm_map_to_entry(map);
+ map->hdr.nentries = 0;
+ map->hdr.entries_pageable = pageable;
+ rbtree_init(&map->hdr.tree);
+
+ map->size = 0;
+ map->ref_count = 1;
+ map->pmap = pmap;
+ map->min_offset = min;
+ map->max_offset = max;
+ map->wiring_required = FALSE;
+ map->wait_for_space = FALSE;
+ map->first_free = vm_map_to_entry(map);
+ map->hint = vm_map_to_entry(map);
+ vm_map_lock_init(map);
+ simple_lock_init(&map->ref_lock);
+ simple_lock_init(&map->hint_lock);
+}
+
/*
* vm_map_create:
*
@@ -210,27 +249,11 @@ vm_map_t vm_map_create(pmap, min, max, pageable)
{
register vm_map_t result;
- result = (vm_map_t) zalloc(vm_map_zone);
+ result = (vm_map_t) kmem_cache_alloc(&vm_map_cache);
if (result == VM_MAP_NULL)
panic("vm_map_create");
- vm_map_first_entry(result) = vm_map_to_entry(result);
- vm_map_last_entry(result) = vm_map_to_entry(result);
- result->hdr.nentries = 0;
- result->hdr.entries_pageable = pageable;
-
- result->size = 0;
- result->ref_count = 1;
- result->pmap = pmap;
- result->min_offset = min;
- result->max_offset = max;
- result->wiring_required = FALSE;
- result->wait_for_space = FALSE;
- result->first_free = vm_map_to_entry(result);
- result->hint = vm_map_to_entry(result);
- vm_map_lock_init(result);
- simple_lock_init(&result->ref_lock);
- simple_lock_init(&result->hint_lock);
+ vm_map_setup(result, pmap, min, max, pageable);
return(result);
}
@@ -250,15 +273,15 @@ vm_map_t vm_map_create(pmap, min, max, pageable)
vm_map_entry_t _vm_map_entry_create(map_header)
register struct vm_map_header *map_header;
{
- register zone_t zone;
+ register kmem_cache_t cache;
register vm_map_entry_t entry;
if (map_header->entries_pageable)
- zone = vm_map_entry_zone;
+ cache = &vm_map_entry_cache;
else
- zone = vm_map_kentry_zone;
+ cache = &vm_map_kentry_cache;
- entry = (vm_map_entry_t) zalloc(zone);
+ entry = (vm_map_entry_t) kmem_cache_alloc(cache);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");
@@ -280,20 +303,50 @@ void _vm_map_entry_dispose(map_header, entry)
register struct vm_map_header *map_header;
register vm_map_entry_t entry;
{
- register zone_t zone;
+ register kmem_cache_t cache;
if (map_header->entries_pageable)
- zone = vm_map_entry_zone;
+ cache = &vm_map_entry_cache;
else
- zone = vm_map_kentry_zone;
+ cache = &vm_map_kentry_cache;
- zfree(zone, (vm_offset_t) entry);
+ kmem_cache_free(cache, (vm_offset_t) entry);
+}
+
+/*
+ * Red-black tree lookup/insert comparison functions
+ */
+static inline int vm_map_entry_cmp_lookup(vm_offset_t addr,
+ const struct rbtree_node *node)
+{
+ struct vm_map_entry *entry;
+
+ entry = rbtree_entry(node, struct vm_map_entry, tree_node);
+
+ if (addr < entry->vme_start)
+ return -1;
+ else if (addr < entry->vme_end)
+ return 0;
+ else
+ return 1;
+}
+
+static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a,
+ const struct rbtree_node *b)
+{
+ struct vm_map_entry *entry;
+
+ entry = rbtree_entry(a, struct vm_map_entry, tree_node);
+ return vm_map_entry_cmp_lookup(entry->vme_start, b);
}
/*
* vm_map_entry_{un,}link:
*
* Insert/remove entries from maps (or map copies).
+ *
+ * The start and end addresses of the entries must be properly set
+ * before using these macros.
*/
#define vm_map_entry_link(map, after_where, entry) \
_vm_map_entry_link(&(map)->hdr, after_where, entry)
@@ -308,6 +361,8 @@ void _vm_map_entry_dispose(map_header, entry)
(entry)->vme_next = (after_where)->vme_next; \
(entry)->vme_prev->vme_next = \
(entry)->vme_next->vme_prev = (entry); \
+ rbtree_insert(&(hdr)->tree, &(entry)->tree_node, \
+ vm_map_entry_cmp_insert); \
MACRO_END
#define vm_map_entry_unlink(map, entry) \
@@ -321,6 +376,7 @@ void _vm_map_entry_dispose(map_header, entry)
(hdr)->nentries--; \
(entry)->vme_next->vme_prev = (entry)->vme_prev; \
(entry)->vme_prev->vme_next = (entry)->vme_next; \
+ rbtree_remove(&(hdr)->tree, &(entry)->tree_node); \
MACRO_END
/*
@@ -368,7 +424,7 @@ void vm_map_deallocate(map)
pmap_destroy(map->pmap);
- zfree(vm_map_zone, (vm_offset_t) map);
+ kmem_cache_free(&vm_map_cache, (vm_offset_t) map);
}
/*
@@ -397,70 +453,49 @@ boolean_t vm_map_lookup_entry(map, address, entry)
register vm_offset_t address;
vm_map_entry_t *entry; /* OUT */
{
- register vm_map_entry_t cur;
- register vm_map_entry_t last;
+ register struct rbtree_node *node;
+ register vm_map_entry_t hint;
/*
- * Start looking either from the head of the
- * list, or from the hint.
+ * First, make a quick check to see if we are already
+ * looking at the entry we want (which is often the case).
*/
simple_lock(&map->hint_lock);
- cur = map->hint;
+ hint = map->hint;
simple_unlock(&map->hint_lock);
- if (cur == vm_map_to_entry(map))
- cur = cur->vme_next;
-
- if (address >= cur->vme_start) {
- /*
- * Go from hint to end of list.
- *
- * But first, make a quick check to see if
- * we are already looking at the entry we
- * want (which is usually the case).
- * Note also that we don't need to save the hint
- * here... it is the same hint (unless we are
- * at the header, in which case the hint didn't
- * buy us anything anyway).
- */
- last = vm_map_to_entry(map);
- if ((cur != last) && (cur->vme_end > address)) {
- *entry = cur;
+ if ((hint != vm_map_to_entry(map)) && (address >= hint->vme_start)) {
+ if (address < hint->vme_end) {
+ *entry = hint;
return(TRUE);
+ } else {
+ vm_map_entry_t next = hint->vme_next;
+
+ if ((next == vm_map_to_entry(map))
+ || (address < next->vme_start)) {
+ *entry = hint;
+ return(FALSE);
+ }
}
}
- else {
- /*
- * Go from start to hint, *inclusively*
- */
- last = cur->vme_next;
- cur = vm_map_first_entry(map);
- }
/*
- * Search linearly
+ * If the hint didn't help, use the red-black tree.
*/
- while (cur != last) {
- if (cur->vme_end > address) {
- if (address >= cur->vme_start) {
- /*
- * Save this lookup for future
- * hints, and return
- */
+ node = rbtree_lookup_nearest(&map->hdr.tree, address,
+ vm_map_entry_cmp_lookup, RBTREE_LEFT);
- *entry = cur;
- SAVE_HINT(map, cur);
- return(TRUE);
- }
- break;
- }
- cur = cur->vme_next;
+ if (node == NULL) {
+ *entry = vm_map_to_entry(map);
+ SAVE_HINT(map, *entry);
+ return(FALSE);
+ } else {
+ *entry = rbtree_entry(node, struct vm_map_entry, tree_node);
+ SAVE_HINT(map, *entry);
+ return((address < (*entry)->vme_end) ? TRUE : FALSE);
}
- *entry = cur->vme_prev;
- SAVE_HINT(map, *entry);
- return(FALSE);
}
/*
@@ -695,7 +730,7 @@ vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
if (vm_map_pmap_enter_print) {
printf("vm_map_pmap_enter:");
- printf("map: %p, addr: %x, object: %p, offset: %x\n",
+ printf("map: %p, addr: %lx, object: %p, offset: %lx\n",
map, addr, object, offset);
}
@@ -754,6 +789,9 @@ kern_return_t vm_map_enter(
#define RETURN(value) { result = value; goto BailOut; }
+ if (size == 0)
+ return KERN_INVALID_ARGUMENT;
+
StartAgain: ;
start = *address;
@@ -1907,7 +1945,7 @@ free_next_copy:
register vm_map_copy_t new_copy;
new_copy = (vm_map_copy_t) copy->cpy_cont_args;
- zfree(vm_map_copy_zone, (vm_offset_t) copy);
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
copy = new_copy;
goto free_next_copy;
}
@@ -1918,7 +1956,7 @@ free_next_copy:
break;
}
- zfree(vm_map_copy_zone, (vm_offset_t) copy);
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
}
/*
@@ -1952,7 +1990,7 @@ vm_map_copy_copy(copy)
* from the old one into it.
*/
- new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ new_copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
*new_copy = *copy;
if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
@@ -2160,7 +2198,7 @@ start_pass_1:
/*
* XXXO If there are no permanent objects in the destination,
- * XXXO and the source and destination map entry zones match,
+ * XXXO and the source and destination map entry caches match,
* XXXO and the destination map entry is not shared,
* XXXO then the map entries can be deleted and replaced
* XXXO with those from the copy. The following code is the
@@ -2398,12 +2436,16 @@ start_pass_1:
*/
#define vm_map_copy_insert(map, where, copy) \
MACRO_BEGIN \
+ struct rbtree_node *node, *tmp; \
+ rbtree_for_each_remove(&(copy)->cpy_hdr.tree, node, tmp) \
+ rbtree_insert(&(map)->hdr.tree, node, \
+ vm_map_entry_cmp_insert); \
(((where)->vme_next)->vme_prev = vm_map_copy_last_entry(copy)) \
->vme_next = ((where)->vme_next); \
((where)->vme_next = vm_map_copy_first_entry(copy)) \
->vme_prev = (where); \
(map)->hdr.nentries += (copy)->cpy_hdr.nentries; \
- zfree(vm_map_copy_zone, (vm_offset_t) copy); \
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); \
MACRO_END
/*
@@ -2459,7 +2501,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS)
return(kr);
- zfree(vm_map_copy_zone, (vm_offset_t) copy);
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
return(KERN_SUCCESS);
}
@@ -2516,15 +2558,15 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* Mismatches occur when dealing with the default
* pager.
*/
- zone_t old_zone;
+ kmem_cache_t old_cache;
vm_map_entry_t next, new;
/*
- * Find the zone that the copies were allocated from
+ * Find the cache that the copies were allocated from
*/
- old_zone = (copy->cpy_hdr.entries_pageable)
- ? vm_map_entry_zone
- : vm_map_kentry_zone;
+ old_cache = (copy->cpy_hdr.entries_pageable)
+ ? &vm_map_entry_cache
+ : &vm_map_kentry_cache;
entry = vm_map_copy_first_entry(copy);
/*
@@ -2547,7 +2589,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
vm_map_copy_last_entry(copy),
new);
next = entry->vme_next;
- zfree(old_zone, (vm_offset_t) entry);
+ kmem_cache_free(old_cache, (vm_offset_t) entry);
entry = next;
}
}
@@ -3036,10 +3078,10 @@ error:
* Consume on success logic.
*/
if (copy != orig_copy) {
- zfree(vm_map_copy_zone, (vm_offset_t) copy);
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
}
if (result == KERN_SUCCESS) {
- zfree(vm_map_copy_zone, (vm_offset_t) orig_copy);
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) orig_copy);
}
return(result);
@@ -3116,12 +3158,13 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
* remember the endpoints prior to rounding.
*/
- copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
vm_map_copy_first_entry(copy) =
vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
copy->type = VM_MAP_COPY_ENTRY_LIST;
copy->cpy_hdr.nentries = 0;
copy->cpy_hdr.entries_pageable = TRUE;
+ rbtree_init(&copy->cpy_hdr.tree);
copy->offset = src_addr;
copy->size = len;
@@ -3443,7 +3486,7 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
* and null links.
*/
- copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
vm_map_copy_first_entry(copy) =
vm_map_copy_last_entry(copy) = VM_MAP_ENTRY_NULL;
copy->type = VM_MAP_COPY_OBJECT;
@@ -3598,7 +3641,7 @@ kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy,
* be page-aligned.
*/
- copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
copy->type = VM_MAP_COPY_PAGE_LIST;
copy->cpy_npages = 0;
copy->offset = src_addr;
@@ -4157,6 +4200,8 @@ vm_map_t vm_map_fork(old_map)
object->ref_count++;
vm_object_unlock(object);
+ new_entry = vm_map_entry_create(new_map);
+
if (old_entry->projected_on != 0) {
/*
* If entry is projected buffer, clone the
@@ -4171,7 +4216,6 @@ vm_map_t vm_map_fork(old_map)
* Mark both entries as shared.
*/
- new_entry = vm_map_entry_create(new_map);
vm_map_entry_copy(new_entry, old_entry);
old_entry->is_shared = TRUE;
new_entry->is_shared = TRUE;
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 567fe933..a15e681b 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -51,8 +51,12 @@
#include <vm/vm_page.h>
#include <vm/vm_types.h>
#include <kern/lock.h>
+#include <kern/rbtree.h>
#include <kern/macro_help.h>
+/* TODO: make it dynamic */
+#define KENTRY_DATA_SIZE (64*PAGE_SIZE)
+
/*
* Types defined:
*
@@ -100,6 +104,7 @@ struct vm_map_entry {
#define vme_next links.next
#define vme_start links.start
#define vme_end links.end
+ struct rbtree_node tree_node; /* links to other entries in tree */
union vm_map_object object; /* object I point to */
vm_offset_t offset; /* offset into object */
unsigned int
@@ -135,6 +140,7 @@ typedef struct vm_map_entry *vm_map_entry_t;
*/
struct vm_map_header {
struct vm_map_links links; /* first, last, min, max */
+ struct rbtree tree; /* Sorted tree of entries */
int nentries; /* Number of entries */
boolean_t entries_pageable;
/* are map entries pageable? */
@@ -150,9 +156,11 @@ struct vm_map_header {
*
* Implementation:
* Maps are doubly-linked lists of map entries, sorted
- * by address. One hint is used to start
- * searches again from the last successful search,
- * insertion, or removal. Another hint is used to
+ * by address. They're also contained in a red-black tree.
+ * One hint is used to start searches again at the last
+ * successful search, insertion, or removal. If the hint
+ * lookup failed (i.e. the hint didn't refer to the requested
+ * entry), a BST lookup is performed. Another hint is used to
* quickly find free space.
*/
struct vm_map {
@@ -352,11 +360,14 @@ MACRO_END
*/
extern vm_offset_t kentry_data;
-extern vm_offset_t kentry_data_size;
+extern vm_size_t kentry_data_size;
extern int kentry_count;
/* Initialize the module */
extern void vm_map_init(void);
+/* Initialize an empty map */
+extern void vm_map_setup(vm_map_t, pmap_t, vm_offset_t, vm_offset_t,
+ boolean_t);
/* Create an empty map */
extern vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t,
boolean_t);
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 9057973d..d83c39fe 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -47,7 +47,7 @@
#include <kern/lock.h>
#include <kern/queue.h>
#include <kern/xpr.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <vm/memory_object.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
@@ -55,6 +55,10 @@
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
+#if MACH_KDB
+#include <ddb/db_output.h>
+#endif /* MACH_KDB */
+
void memory_object_release(
ipc_port_t pager,
@@ -141,13 +145,14 @@ void vm_object_deactivate_pages(vm_object_t);
* ZZZ Continue this comment.
*/
-zone_t vm_object_zone; /* vm backing store zone */
+struct kmem_cache vm_object_cache; /* vm backing store cache */
/*
* All wired-down kernel memory belongs to a single virtual
* memory object (kernel_object) to avoid wasting data structures.
*/
-vm_object_t kernel_object;
+static struct vm_object kernel_object_store;
+vm_object_t kernel_object = &kernel_object_store;
/*
* Virtual memory objects that are not referenced by
@@ -191,6 +196,15 @@ decl_simple_lock_data(,vm_object_cached_lock_data)
simple_unlock(&vm_object_cached_lock_data)
/*
+ * Number of physical pages referenced by cached objects.
+ * This counter is protected by its own lock to work around
+ * lock ordering issues.
+ */
+int vm_object_cached_pages;
+
+decl_simple_lock_data(,vm_object_cached_pages_lock_data)
+
+/*
* Virtual memory objects are initialized from
* a template (see vm_object_allocate).
*
@@ -198,7 +212,7 @@ decl_simple_lock_data(,vm_object_cached_lock_data)
* object structure, be sure to add initialization
* (see vm_object_init).
*/
-vm_object_t vm_object_template;
+struct vm_object vm_object_template;
/*
* vm_object_allocate:
@@ -206,17 +220,24 @@ vm_object_t vm_object_template;
* Returns a new object with the given size.
*/
+static void _vm_object_setup(
+ vm_object_t object,
+ vm_size_t size)
+{
+ *object = vm_object_template;
+ queue_init(&object->memq);
+ vm_object_lock_init(object);
+ object->size = size;
+}
+
vm_object_t _vm_object_allocate(
vm_size_t size)
{
register vm_object_t object;
- object = (vm_object_t) zalloc(vm_object_zone);
+ object = (vm_object_t) kmem_cache_alloc(&vm_object_cache);
- *object = *vm_object_template;
- queue_init(&object->memq);
- vm_object_lock_init(object);
- object->size = size;
+ _vm_object_setup(object, size);
return object;
}
@@ -244,10 +265,8 @@ vm_object_t vm_object_allocate(
*/
void vm_object_bootstrap(void)
{
- vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object), 0,
- round_page(512*1024),
- round_page(12*1024),
- 0, "objects");
+ kmem_cache_init(&vm_object_cache, "vm_object",
+ sizeof(struct vm_object), 0, NULL, NULL, NULL, 0);
queue_init(&vm_object_cached_list);
simple_lock_init(&vm_object_cached_lock_data);
@@ -256,53 +275,50 @@ void vm_object_bootstrap(void)
* Fill in a template object, for quick initialization
*/
- vm_object_template = (vm_object_t) zalloc(vm_object_zone);
- memset(vm_object_template, 0, sizeof *vm_object_template);
-
- vm_object_template->ref_count = 1;
- vm_object_template->size = 0;
- vm_object_template->resident_page_count = 0;
- vm_object_template->copy = VM_OBJECT_NULL;
- vm_object_template->shadow = VM_OBJECT_NULL;
- vm_object_template->shadow_offset = (vm_offset_t) 0;
+ vm_object_template.ref_count = 1;
+ vm_object_template.size = 0;
+ vm_object_template.resident_page_count = 0;
+ vm_object_template.copy = VM_OBJECT_NULL;
+ vm_object_template.shadow = VM_OBJECT_NULL;
+ vm_object_template.shadow_offset = (vm_offset_t) 0;
- vm_object_template->pager = IP_NULL;
- vm_object_template->paging_offset = 0;
- vm_object_template->pager_request = PAGER_REQUEST_NULL;
- vm_object_template->pager_name = IP_NULL;
+ vm_object_template.pager = IP_NULL;
+ vm_object_template.paging_offset = 0;
+ vm_object_template.pager_request = PAGER_REQUEST_NULL;
+ vm_object_template.pager_name = IP_NULL;
- vm_object_template->pager_created = FALSE;
- vm_object_template->pager_initialized = FALSE;
- vm_object_template->pager_ready = FALSE;
+ vm_object_template.pager_created = FALSE;
+ vm_object_template.pager_initialized = FALSE;
+ vm_object_template.pager_ready = FALSE;
- vm_object_template->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_NONE;
/* ignored if temporary, will be reset before
* permanent object becomes ready */
- vm_object_template->use_shared_copy = FALSE;
- vm_object_template->shadowed = FALSE;
-
- vm_object_template->absent_count = 0;
- vm_object_template->all_wanted = 0; /* all bits FALSE */
-
- vm_object_template->paging_in_progress = 0;
- vm_object_template->can_persist = FALSE;
- vm_object_template->internal = TRUE;
- vm_object_template->temporary = TRUE;
- vm_object_template->alive = TRUE;
- vm_object_template->lock_in_progress = FALSE;
- vm_object_template->lock_restart = FALSE;
- vm_object_template->use_old_pageout = TRUE; /* XXX change later */
- vm_object_template->last_alloc = (vm_offset_t) 0;
+ vm_object_template.use_shared_copy = FALSE;
+ vm_object_template.shadowed = FALSE;
+
+ vm_object_template.absent_count = 0;
+ vm_object_template.all_wanted = 0; /* all bits FALSE */
+
+ vm_object_template.paging_in_progress = 0;
+ vm_object_template.can_persist = FALSE;
+ vm_object_template.internal = TRUE;
+ vm_object_template.temporary = TRUE;
+ vm_object_template.alive = TRUE;
+ vm_object_template.lock_in_progress = FALSE;
+ vm_object_template.lock_restart = FALSE;
+ vm_object_template.use_old_pageout = TRUE; /* XXX change later */
+ vm_object_template.last_alloc = (vm_offset_t) 0;
#if MACH_PAGEMAP
- vm_object_template->existence_info = VM_EXTERNAL_NULL;
+ vm_object_template.existence_info = VM_EXTERNAL_NULL;
#endif /* MACH_PAGEMAP */
/*
* Initialize the "kernel object"
*/
- kernel_object = _vm_object_allocate(
+ _vm_object_setup(kernel_object,
VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
/*
@@ -310,7 +326,7 @@ void vm_object_bootstrap(void)
* kernel object so that no limit is imposed on submap sizes.
*/
- vm_submap_object = _vm_object_allocate(
+ _vm_object_setup(vm_submap_object,
VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
#if MACH_PAGEMAP
@@ -407,6 +423,7 @@ void vm_object_deallocate(
queue_enter(&vm_object_cached_list, object,
vm_object_t, cached_list);
overflow = (++vm_object_cached_count > vm_object_cached_max);
+ vm_object_cached_pages_update(object->resident_page_count);
vm_object_cache_unlock();
vm_object_deactivate_pages(object);
@@ -660,7 +677,7 @@ void vm_object_terminate(
* Free the space for the object.
*/
- zfree(vm_object_zone, (vm_offset_t) object);
+ kmem_cache_free(&vm_object_cache, (vm_offset_t) object);
}
/*
@@ -916,7 +933,7 @@ boolean_t vm_object_pmap_protect_by_page = FALSE;
void vm_object_pmap_protect(
register vm_object_t object,
register vm_offset_t offset,
- vm_offset_t size,
+ vm_size_t size,
pmap_t pmap,
vm_offset_t pmap_start,
vm_prot_t prot)
@@ -1857,6 +1874,7 @@ vm_object_t vm_object_lookup(
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
vm_object_cached_count--;
+ vm_object_cached_pages_update(-object->resident_page_count);
}
object->ref_count++;
@@ -1888,6 +1906,7 @@ vm_object_t vm_object_lookup_name(
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
vm_object_cached_count--;
+ vm_object_cached_pages_update(-object->resident_page_count);
}
object->ref_count++;
@@ -1924,6 +1943,7 @@ void vm_object_destroy(
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
vm_object_cached_count--;
+ vm_object_cached_pages_update(-object->resident_page_count);
}
object->ref_count++;
@@ -2077,6 +2097,7 @@ restart:
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
vm_object_cached_count--;
+ vm_object_cached_pages_update(-object->resident_page_count);
}
object->ref_count++;
vm_object_unlock(object);
@@ -2618,7 +2639,7 @@ void vm_object_collapse(
vm_object_unlock(object);
if (old_name_port != IP_NULL)
ipc_port_dealloc_kernel(old_name_port);
- zfree(vm_object_zone, (vm_offset_t) backing_object);
+ kmem_cache_free(&vm_object_cache, (vm_offset_t) backing_object);
vm_object_lock(object);
object_collapses++;
diff --git a/vm/vm_object.h b/vm/vm_object.h
index c9925709..4e4c9498 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -71,8 +71,8 @@ struct vm_object {
* if internal)
*/
- short ref_count; /* Number of references */
- short resident_page_count;
+ int ref_count; /* Number of references */
+ int resident_page_count;
/* number of resident pages */
struct vm_object *copy; /* Object that should receive
@@ -370,4 +370,23 @@ MACRO_END
#define vm_object_lock_taken(object) simple_lock_taken(&(object)->Lock)
#endif /* VM_OBJECT_DEBUG */
+/*
+ * Page cache accounting.
+ *
+ * The number of cached objects and pages can be read
+ * without holding any lock.
+ */
+
+extern int vm_object_cached_count;
+
+extern int vm_object_cached_pages;
+decl_simple_lock_data(extern,vm_object_cached_pages_lock_data)
+
+#define vm_object_cached_pages_update(page_count) \
+ MACRO_BEGIN \
+ simple_lock(&vm_object_cached_pages_lock_data); \
+ vm_object_cached_pages += (page_count); \
+ simple_unlock(&vm_object_cached_pages_lock_data); \
+ MACRO_END
+
#endif /* _VM_VM_OBJECT_H_ */
diff --git a/vm/vm_page.h b/vm/vm_page.h
index f13b0af9..4536d1c5 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -41,7 +41,6 @@
#include <vm/vm_types.h>
#include <kern/queue.h>
#include <kern/lock.h>
-#include <kern/zalloc.h>
#include <kern/macro_help.h>
#include <kern/sched_prim.h> /* definitions of wait/wakeup */
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index 7a755bf4..661675f0 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -43,6 +43,7 @@
#include <mach/vm_statistics.h>
#include <kern/counters.h>
#include <kern/debug.h>
+#include <kern/slab.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <vm/pmap.h>
@@ -544,8 +545,8 @@ void vm_pageout_scan()
* into an internal object and then immediately double-page it,
* sending it to the default pager.
*
- * consider_zone_gc should be last, because the other operations
- * might return memory to zones. When we pause we use
+ * slab_collect should be last, because the other operations
+ * might return memory to caches. When we pause we use
* vm_pageout_scan_continue as our continuation, so we will
* reenter vm_pageout_scan periodically and attempt to reclaim
* internal memory even if we never reach vm_page_free_target.
@@ -555,7 +556,7 @@ void vm_pageout_scan()
net_kmsg_collect();
consider_task_collect();
consider_thread_collect();
- consider_zone_gc();
+ slab_collect();
for (burst_count = 0;;) {
register vm_page_t m;
@@ -763,6 +764,7 @@ void vm_pageout_scan()
vm_object_unlock(object);
vm_page_activate(m);
vm_stat.reactivations++;
+ current_task()->reactivations++;
vm_page_unlock_queues();
vm_pageout_inactive_used++;
continue;
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index a6334989..7cf4fb16 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -45,7 +45,7 @@
#include <mach/vm_statistics.h>
#include <machine/vm_param.h>
#include <kern/xpr.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
@@ -58,9 +58,10 @@
#include <vm/vm_user.h>
#endif
-/* in zalloc.c XXX */
-extern vm_offset_t zdata;
-extern vm_size_t zdata_size;
+#if MACH_KDB
+#include <ddb/db_output.h>
+#endif /* MACH_KDB */
+
/*
* Associated with eacn page of user-allocatable memory is a
@@ -126,7 +127,7 @@ unsigned int vm_page_free_count_minimum; /* debugging */
* These page structures are allocated the way
* most other kernel structures are.
*/
-zone_t vm_page_zone;
+struct kmem_cache vm_page_cache;
/*
* Fictitious pages don't have a physical address,
@@ -239,14 +240,11 @@ void vm_page_bootstrap(
vm_page_free_wanted = 0;
/*
- * Steal memory for the zone system.
+ * Steal memory for the kernel map entries.
*/
- kentry_data_size = kentry_count * sizeof(struct vm_map_entry);
kentry_data = pmap_steal_memory(kentry_data_size);
- zdata = pmap_steal_memory(zdata_size);
-
/*
* Allocate (and initialize) the virtual-to-physical
* table hash buckets.
@@ -393,6 +391,11 @@ void pmap_startup(
vm_page_init(&pages[i], paddr);
pages_initialized++;
}
+ i = 0;
+ while (pmap_next_page(&paddr))
+ i++;
+ if (i)
+ printf("%d memory page(s) left away\n", i);
/*
* Release pages in reverse order so that physical pages
@@ -425,10 +428,8 @@ void pmap_startup(
*/
void vm_page_module_init(void)
{
- vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page), 0,
- VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
- PAGE_SIZE,
- 0, "vm pages");
+ kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0,
+ NULL, NULL, NULL, 0);
}
/*
@@ -450,7 +451,7 @@ void vm_page_create(
for (paddr = round_page(start);
paddr < trunc_page(end);
paddr += PAGE_SIZE) {
- m = (vm_page_t) zalloc(vm_page_zone);
+ m = (vm_page_t) kmem_cache_alloc(&vm_page_cache);
if (m == VM_PAGE_NULL)
panic("vm_page_create");
@@ -521,6 +522,10 @@ void vm_page_insert(
*/
object->resident_page_count++;
+ assert(object->resident_page_count >= 0);
+
+ if (object->can_persist && (object->ref_count == 0))
+ vm_object_cached_pages_update(1);
/*
* Detect sequential access and inactivate previous page.
@@ -589,6 +594,10 @@ void vm_page_replace(
m->tabled = FALSE;
object->resident_page_count--;
+ if (object->can_persist
+ && (object->ref_count == 0))
+ vm_object_cached_pages_update(-1);
+
/*
* Return page to the free list.
* Note the page is not tabled now, so this
@@ -620,6 +629,10 @@ void vm_page_replace(
*/
object->resident_page_count++;
+ assert(object->resident_page_count >= 0);
+
+ if (object->can_persist && (object->ref_count == 0))
+ vm_object_cached_pages_update(1);
}
/*
@@ -675,6 +688,9 @@ void vm_page_remove(
mem->object->resident_page_count--;
mem->tabled = FALSE;
+
+ if (mem->object->can_persist && (mem->object->ref_count == 0))
+ vm_object_cached_pages_update(-1);
}
/*
@@ -805,7 +821,7 @@ void vm_page_more_fictitious(void)
int i;
for (i = 0; i < vm_page_fictitious_quantum; i++) {
- m = (vm_page_t) zalloc(vm_page_zone);
+ m = (vm_page_t) kmem_cache_alloc(&vm_page_cache);
if (m == VM_PAGE_NULL)
panic("vm_page_more_fictitious");
diff --git a/vm/vm_user.c b/vm/vm_user.c
index 672daab8..6fe398e0 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -38,12 +38,14 @@
#include <mach/vm_attributes.h>
#include <mach/vm_param.h>
#include <mach/vm_statistics.h>
+#include <mach/vm_cache_statistics.h>
#include <kern/host.h>
#include <kern/task.h>
#include <vm/vm_fault.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
+#include <vm/memory_object_proxy.h>
#include <vm/vm_page.h>
@@ -188,6 +190,29 @@ kern_return_t vm_statistics(map, stat)
return(KERN_SUCCESS);
}
+kern_return_t vm_cache_statistics(
+ vm_map_t map,
+ vm_cache_statistics_data_t *stats)
+{
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ stats->cache_object_count = vm_object_cached_count;
+ stats->cache_count = vm_object_cached_pages;
+
+ /* XXX Not implemented yet */
+ stats->active_tmp_count = 0;
+ stats->inactive_tmp_count = 0;
+ stats->active_perm_count = 0;
+ stats->inactive_perm_count = 0;
+ stats->dirty_count = 0;
+ stats->laundry_count = 0;
+ stats->writeback_count = 0;
+ stats->slab_count = 0;
+ stats->slab_reclaim_count = 0;
+ return KERN_SUCCESS;
+}
+
/*
* Handle machine-specific attributes for a mapping, such
* as cachability, migrability, etc.
@@ -277,11 +302,6 @@ kern_return_t vm_copy(map, source_address, size, dest_address)
}
-/* XXX From memory_object_proxy.c */
-kern_return_t
-memory_object_proxy_lookup (ipc_port_t proxy_object, ipc_port_t *object,
- vm_prot_t *max_protection);
-
/*
* Routine: vm_map
*/
@@ -322,6 +342,9 @@ kern_return_t vm_map(
return(KERN_INVALID_ARGUMENT);
}
+ if (size == 0)
+ return KERN_INVALID_ARGUMENT;
+
*address = trunc_page(*address);
size = round_page(size);
diff --git a/vm/vm_user.h b/vm/vm_user.h
index 3f15e5ed..c6f20a82 100644
--- a/vm/vm_user.h
+++ b/vm/vm_user.h
@@ -37,6 +37,7 @@
#include <mach/kern_return.h>
#include <mach/std_types.h>
+#include <mach/mach_types.h>
extern kern_return_t vm_allocate(vm_map_t, vm_offset_t *, vm_size_t,
boolean_t);
@@ -46,6 +47,7 @@ extern kern_return_t vm_inherit(vm_map_t, vm_offset_t, vm_size_t,
extern kern_return_t vm_protect(vm_map_t, vm_offset_t, vm_size_t, boolean_t,
vm_prot_t);
extern kern_return_t vm_statistics(vm_map_t, vm_statistics_data_t *);
+extern kern_return_t vm_cache_statistics(vm_map_t, vm_cache_statistics_data_t *);
extern kern_return_t vm_read(vm_map_t, vm_address_t, vm_size_t, pointer_t *,
vm_size_t *);
extern kern_return_t vm_write(vm_map_t, vm_address_t, pointer_t, vm_size_t);
diff --git a/xen/block.c b/xen/block.c
index 4718891d..4253ef04 100644
--- a/xen/block.c
+++ b/xen/block.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2009, 2011 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,6 +18,7 @@
#include <sys/types.h>
#include <mach/mig_errors.h>
+#include <kern/kalloc.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
#include <vm/vm_kern.h>
@@ -229,12 +230,12 @@ void hyp_block_init(void) {
t = hyp_store_transaction_start();
/* Get a page for ring */
- if (kmem_alloc_wired(kernel_map, &addr, PAGE_SIZE) != KERN_SUCCESS)
+ if ((addr = vm_page_grab_phys_addr()) == -1)
panic("%s: couldn't allocate space for store ring\n", device_name);
- ring = (void*) addr;
+ ring = (void*) phystokv(addr);
SHARED_RING_INIT(ring);
FRONT_RING_INIT(&bd->ring, ring, PAGE_SIZE);
- grant = hyp_grant_give(domid, atop(kvtophys(addr)), 0);
+ grant = hyp_grant_give(domid, atop(addr), 0);
/* and give it to backend. */
i = sprintf(port_name, "%u", grant);
@@ -516,7 +517,7 @@ device_read (void *d, ipc_port_t reply_port,
thread_block(NULL);
if (err)
- printf("error reading %d bytes at sector %d\n", amt,
+ printf("error reading %d bytes at sector %ld\n", amt,
bn + offset / 512);
for (i = 0; i < nbpages; i++)
diff --git a/xen/block.h b/xen/block.h
index 5955968a..82488855 100644
--- a/xen/block.h
+++ b/xen/block.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/xen/configfrag.ac b/xen/configfrag.ac
index eb689960..3745a31f 100644
--- a/xen/configfrag.ac
+++ b/xen/configfrag.ac
@@ -25,8 +25,10 @@ dnl 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
AC_DEFINE([MACH_HYP], [], [be a hypervisor guest])
AM_CONDITIONAL([PLATFORM_xen], [true])
+dnl These are experimental
+
AC_ARG_ENABLE([pseudo-phys],
- AS_HELP_STRING([--enable-pseudo-phys], [Pseudo physical support]))
+ AS_HELP_STRING([--disable-pseudo-phys], [Pseudo physical pages support]))
[if [ x"$enable_pseudo_phys" = xno ]; then]
AM_CONDITIONAL([enable_pseudo_phys], [false])
[else]
@@ -34,9 +36,39 @@ dnl 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
AM_CONDITIONAL([enable_pseudo_phys], [true])
[fi]
+ AC_ARG_ENABLE([pv-pagetables],
+ AS_HELP_STRING([--disable-pv-pagetables], [Paravirtualized page tables support]))
+ [if [ x"$enable_pv_pagetables" = xno ]; then]
+ AM_CONDITIONAL([enable_pv_pagetables], [false])
+ [else]
+ AC_DEFINE([MACH_PV_PAGETABLES], [], [Enable paravirtualized page tables support])
+ AM_CONDITIONAL([enable_pv_pagetables], [true])
+ [fi]
+
+ AC_ARG_ENABLE([pv-descriptors],
+ AS_HELP_STRING([--disable-pv-descriptors], [Paravirtualized segment descriptors support]))
+ [if [ x"$enable_pv_descriptors" = xno ]; then]
+ AM_CONDITIONAL([enable_pv_descriptors], [false])
+ [else]
+ AC_DEFINE([MACH_PV_DESCRIPTORS], [], [Enable paravirtualized segment descriptors support])
+ AM_CONDITIONAL([enable_pv_descriptors], [true])
+ [fi]
+
+ AC_ARG_ENABLE([ring1],
+ AS_HELP_STRING([--disable-ring1], [ring1 kernel support]))
+ [if [ x"$enable_ring1" = xno ]; then]
+ AM_CONDITIONAL([enable_ring1], [false])
+ [else]
+ AC_DEFINE([MACH_RING1], [], [Enable ring1 kernel support])
+ AM_CONDITIONAL([enable_ring1], [true])
+ [fi]
+
[else]
AM_CONDITIONAL([PLATFORM_xen], [false])
AM_CONDITIONAL([enable_pseudo_phys], [false])
+ AM_CONDITIONAL([enable_pv_pagetables], [false])
+ AM_CONDITIONAL([enable_pv_descriptors], [false])
+ AM_CONDITIONAL([enable_ring1], [false])
[fi]
dnl Local Variables:
diff --git a/xen/console.c b/xen/console.c
index 9798ec0a..884376ff 100644
--- a/xen/console.c
+++ b/xen/console.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2011 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -47,9 +47,13 @@ int hypputc(int c)
hyp_console_io(CONSOLEIO_write, 1, kvtolin(&d));
} else {
spl_t spl = splhigh();
+ int complain;
simple_lock(&outlock);
while (hyp_ring_smash(console->out, console->out_prod, console->out_cons)) {
- hyp_console_put("ring smash\n");
+ if (!complain) {
+ complain = 1;
+ hyp_console_put("ring smash\n");
+ }
/* TODO: are we allowed to sleep in putc? */
hyp_yield();
}
@@ -216,9 +220,6 @@ int hypcnclose(int dev, int flag)
int hypcnprobe(struct consdev *cp)
{
- struct xencons_interface *my_console;
- my_console = (void*) mfn_to_kv(boot_info.console_mfn);
-
cp->cn_dev = makedev(0, 0);
cp->cn_pri = CN_INTERNAL;
return 0;
@@ -231,7 +232,9 @@ int hypcninit(struct consdev *cp)
simple_lock_init(&outlock);
simple_lock_init(&inlock);
console = (void*) mfn_to_kv(boot_info.console_mfn);
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readwrite(console);
+#endif /* MACH_PV_PAGETABLES */
hyp_evt_handler(boot_info.console_evtchn, hypcnintr, 0, SPL6);
return 0;
}
diff --git a/xen/console.h b/xen/console.h
index fa13dc0f..ad171a47 100644
--- a/xen/console.h
+++ b/xen/console.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,8 @@
#include <machine/xen.h>
#include <string.h>
+#include <device/cons.h>
+
#define hyp_console_write(str, len) hyp_console_io (CONSOLEIO_write, (len), kvtolin(str))
#define hyp_console_put(str) ({ \
@@ -30,4 +32,9 @@
extern void hyp_console_init(void);
+extern int hypcnputc(dev_t dev, int c);
+extern int hypcngetc(dev_t dev, int wait);
+extern int hypcnprobe(struct consdev *cp);
+extern int hypcninit(struct consdev *cp);
+
#endif /* XEN_CONSOLE_H */
diff --git a/xen/evt.c b/xen/evt.c
index 345e1d06..c62e1d59 100644
--- a/xen/evt.c
+++ b/xen/evt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2007-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -62,12 +62,12 @@ void hyp_c_callback(void *ret_addr, void *regs)
if (ivect[n]) {
spl_t spl = splx(intpri[n]);
- asm ("lock; andl %1,%0":"=m"(hyp_shared_info.evtchn_pending[i]):"r"(~(1<<j)));
+ asm ("lock; and %1,%0":"=m"(hyp_shared_info.evtchn_pending[i]):"r"(~(1UL<<j)));
ivect[n](iunit[n], spl, ret_addr, regs);
splx_cli(spl);
} else {
printf("warning: lost unbound event %d\n", n);
- asm ("lock; andl %1,%0":"=m"(hyp_shared_info.evtchn_pending[i]):"r"(~(1<<j)));
+ asm ("lock; and %1,%0":"=m"(hyp_shared_info.evtchn_pending[i]):"r"(~(1UL<<j)));
}
}
}
diff --git a/xen/evt.h b/xen/evt.h
index a5839776..e4dbad1b 100644
--- a/xen/evt.h
+++ b/xen/evt.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/xen/grant.c b/xen/grant.c
index 505d2026..3d5c3fe7 100644
--- a/xen/grant.c
+++ b/xen/grant.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/xen/grant.h b/xen/grant.h
index ff8617d4..30ce344d 100644
--- a/xen/grant.h
+++ b/xen/grant.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/xen/net.c b/xen/net.c
index dd903eda..10e4bbef 100644
--- a/xen/net.c
+++ b/xen/net.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2009, 2011 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,6 +18,7 @@
#include <sys/types.h>
#include <mach/mig_errors.h>
+#include <kern/kalloc.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
#include <vm/vm_kern.h>
@@ -117,6 +118,61 @@ static void enqueue_rx_buf(struct net_data *nd, int number) {
req->gref = nd->rx_buf_gnt[number] = gref;
}
+static int recompute_checksum(void *data, int len) {
+ unsigned16_t *header16 = data;
+ unsigned8_t *header8 = data;
+ unsigned length, i;
+ unsigned32_t checksum = 0;
+
+ /* IPv4 header length */
+ length = (header8[0] & 0xf) * 4;
+ if (length < 20)
+ /* Too small for an IP header16 */
+ return -1;
+ if (length > len)
+ /* Does not fit in the ethernet frame */
+ return -1;
+
+ /* Compute IP header checksum */
+ header16[5] = 0;
+ for (i = 0; i < length/2; i++)
+ checksum += ntohs(header16[i]);
+
+ while (checksum >> 16)
+ checksum = (checksum & 0xffff) + (checksum >> 16);
+
+ header16[5] = htons(~checksum);
+
+ if (header8[9] == 6) {
+ /* Need to fix TCP checksum as well */
+ unsigned16_t *tcp_header16 = header16 + length/2;
+ unsigned8_t *tcp_header8 = header8 + length;
+ unsigned tcp_length = ntohs(header16[1]) - length;
+
+ /* Pseudo IP header */
+ checksum = ntohs(header16[6]) + ntohs(header16[7]) +
+ ntohs(header16[8]) + ntohs(header16[9]) +
+ header8[9] + tcp_length;
+
+ tcp_header16[8] = 0;
+ for (i = 0; i < tcp_length / 2; i++)
+ checksum += ntohs(tcp_header16[i]);
+ if (tcp_length & 1)
+ checksum += tcp_header8[tcp_length-1] << 8;
+
+ while (checksum >> 16)
+ checksum = (checksum & 0xffff) + (checksum >> 16);
+
+ tcp_header16[8] = htons(~checksum);
+ } else if (header8[9] == 17) {
+ /* Drop any bogus checksum */
+ unsigned16_t *udp_header16 = header16 + length/2;
+ udp_header16[3] = 0;
+ }
+
+ return 0;
+}
+
static void hyp_net_intr(int unit) {
ipc_kmsg_t kmsg;
struct ether_header *eh;
@@ -172,6 +228,25 @@ static void hyp_net_intr(int unit) {
data = nd->rx_buf[number] + rx_rsp->offset;
len = rx_rsp->status;
+ if (rx_rsp->flags & NETRXF_csum_blank) {
+ struct ether_header *ether = data;
+
+ if (!(rx_rsp->flags & NETRXF_data_validated)) {
+ printf("packet with no checksum and not validated, dropping it\n");
+ goto drop_kmsg;
+ }
+
+ /* TODO: rather tell pfinet to ignore checksums */
+
+ if (ntohs(ether->ether_type) != 0x0800) {
+ printf("packet with no checksum and not IPv4, dropping it\n");
+ goto drop_kmsg;
+ }
+
+ if (recompute_checksum(data + sizeof(*ether), len - sizeof(*ether)))
+ goto drop_kmsg;
+ }
+
eh = (void*) (net_kmsg(kmsg)->header);
ph = (void*) (net_kmsg(kmsg)->packet);
memcpy(eh, data, sizeof (struct ether_header));
@@ -283,12 +358,12 @@ void hyp_net_init(void) {
t = hyp_store_transaction_start();
/* Get a page for tx_ring */
- if (kmem_alloc_wired(kernel_map, &addr, PAGE_SIZE) != KERN_SUCCESS)
+ if ((addr = vm_page_grab_phys_addr()) == -1)
panic("eth: couldn't allocate space for store tx_ring");
- tx_ring = (void*) addr;
+ tx_ring = (void*) phystokv(addr);
SHARED_RING_INIT(tx_ring);
FRONT_RING_INIT(&nd->tx, tx_ring, PAGE_SIZE);
- grant = hyp_grant_give(domid, atop(kvtophys(addr)), 0);
+ grant = hyp_grant_give(domid, atop(addr), 0);
/* and give it to backend. */
i = sprintf(port_name, "%u", grant);
@@ -298,12 +373,12 @@ void hyp_net_init(void) {
kfree((vm_offset_t) c, strlen(c)+1);
/* Get a page for rx_ring */
- if (kmem_alloc_wired(kernel_map, &addr, PAGE_SIZE) != KERN_SUCCESS)
+ if ((addr = vm_page_grab_phys_addr()) == -1)
panic("eth: couldn't allocate space for store tx_ring");
- rx_ring = (void*) addr;
+ rx_ring = (void*) phystokv(addr);
SHARED_RING_INIT(rx_ring);
FRONT_RING_INIT(&nd->rx, rx_ring, PAGE_SIZE);
- grant = hyp_grant_give(domid, atop(kvtophys(addr)), 0);
+ grant = hyp_grant_give(domid, atop(addr), 0);
/* and give it to backend. */
i = sprintf(port_name, "%u", grant);
@@ -396,12 +471,12 @@ void hyp_net_init(void) {
/* Get a page for packet reception */
for (i= 0; i<WINDOW; i++) {
- if (kmem_alloc_wired(kernel_map, &addr, PAGE_SIZE) != KERN_SUCCESS)
+ if ((addr = vm_page_grab_phys_addr()) == -1)
panic("eth: couldn't allocate space for store tx_ring");
- nd->rx_buf[i] = (void*)phystokv(kvtophys(addr));
- nd->rx_buf_pfn[i] = atop(kvtophys((vm_offset_t)nd->rx_buf[i]));
+ nd->rx_buf[i] = (void*)phystokv(addr);
+ nd->rx_buf_pfn[i] = atop(addr);
if (!nd->rx_copy) {
- if (hyp_do_update_va_mapping(kvtolin(addr), 0, UVMF_INVLPG|UVMF_ALL))
+ if (hyp_do_update_va_mapping(kvtolin(nd->rx_buf[i]), 0, UVMF_INVLPG|UVMF_ALL))
panic("eth: couldn't clear rx kv buf %d at %p", i, addr);
}
/* and enqueue it to backend. */
diff --git a/xen/net.h b/xen/net.h
index 66838700..836e75f2 100644
--- a/xen/net.h
+++ b/xen/net.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/xen/ring.c b/xen/ring.c
index 8644059e..91ce3ff1 100644
--- a/xen/ring.c
+++ b/xen/ring.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/xen/ring.h b/xen/ring.h
index 6ed00acd..c5c2fe39 100644
--- a/xen/ring.h
+++ b/xen/ring.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/xen/store.c b/xen/store.c
index 3c6baebf..739dc367 100644
--- a/xen/store.c
+++ b/xen/store.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,6 +20,7 @@
#include <mach/mig_support.h>
#include <machine/pmap.h>
#include <machine/ipl.h>
+#include <kern/kalloc.h>
#include <stdarg.h>
#include <string.h>
#include <alloca.h>
@@ -328,7 +329,9 @@ void hyp_store_init(void)
return;
simple_lock_init(&lock);
store = (void*) mfn_to_kv(boot_info.store_mfn);
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readwrite(store);
+#endif /* MACH_PV_PAGETABLES */
/* SPL sched */
hyp_evt_handler(boot_info.store_evtchn, hyp_store_handler, 0, SPL7);
}
diff --git a/xen/store.h b/xen/store.h
index 4b3ee187..ae236eb6 100644
--- a/xen/store.h
+++ b/xen/store.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/xen/time.c b/xen/time.c
index 4c5cc351..a11e7eb4 100644
--- a/xen/time.c
+++ b/xen/time.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -116,11 +116,6 @@ readtodc(tp)
unsigned64_t t = hyp_get_time();
u_int n = t / 1000000000;
-#ifndef MACH_KERNEL
- n += tz.tz_minuteswest * 60;
- if (tz.tz_dsttime)
- n -= 3600;
-#endif /* MACH_KERNEL */
*tp = n;
return(0);
diff --git a/xen/time.h b/xen/time.h
index f8755884..8c8bc53e 100644
--- a/xen/time.h
+++ b/xen/time.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/xen/xen.c b/xen/xen.c
index f8f964eb..28953512 100644
--- a/xen/xen.c
+++ b/xen/xen.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2011 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2007-2011 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,13 +30,6 @@
#include "xen.h"
#include "evt.h"
-void hyp_invalidate_pte(pt_entry_t *pte)
-{
- if (!hyp_mmu_update_pte(kv_to_ma(pte), (*pte) & ~INTEL_PTE_VALID))
- panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte),*pte,ma_to_pa(*pte));
- hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL);
-}
-
void hyp_debug()
{
panic("debug");
diff --git a/xen/xen.h b/xen/xen.h
index 6eeb350e..f0a3abb9 100644
--- a/xen/xen.h
+++ b/xen/xen.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010, 2011 Samuel Thibault <samuel.thibault@ens-lyon.org>
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,7 +21,6 @@
void hyp_init(void);
void hyp_dev_init(void);
-void hyp_invalidate_pte(pt_entry_t *pte);
void hyp_idle(void);
void hyp_p2m_init(void);