summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2019-11-11 23:56:11 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2019-11-11 23:56:11 +0100
commite40b004af55fd7fb243c06361143e2ec2d2c87e2 (patch)
treeeaf3e3b1f10e5316f29287c122036a370ac23f84
parent799d2085cf4668de379f17ee29f1651deb744c04 (diff)
parentb32a359a60822f0190ecfde8386af720a750bdc3 (diff)
Merge branch 'master-user_level_drivers' into master-user_level_drivers-debian
-rw-r--r--COPYING3674
-rw-r--r--Makefile.am15
-rw-r--r--Makefile.in.dep.patch19
-rw-r--r--Makefrag.am4
-rw-r--r--configfrag.ac3
-rw-r--r--configure.ac6
-rw-r--r--ddb/db_access.c3
-rw-r--r--ddb/db_command.c47
-rw-r--r--ddb/db_examine.c5
-rw-r--r--ddb/db_print.c43
-rw-r--r--ddb/db_task_thread.c25
-rw-r--r--ddb/db_task_thread.h6
-rw-r--r--ddb/db_variables.c2
-rw-r--r--device/blkio.c2
-rw-r--r--device/blkio.h2
-rw-r--r--device/conf.h13
-rw-r--r--device/dev_forward.defs44
-rw-r--r--device/dev_hdr.h2
-rw-r--r--device/dev_name.c8
-rw-r--r--device/dev_pager.c1
-rw-r--r--device/kmsg.c2
-rw-r--r--device/kmsg.h4
-rw-r--r--device/net_io.c2
-rw-r--r--device/net_io.h2
-rw-r--r--device/tty.h4
-rw-r--r--doc/mach.texi23
-rw-r--r--i386/i386/db_interface.c12
-rw-r--r--i386/i386/db_interface.h4
-rw-r--r--i386/i386/io_perm.c27
-rw-r--r--i386/i386/io_perm.h2
-rw-r--r--i386/i386/ipl.h1
-rw-r--r--i386/i386/lock.h2
-rw-r--r--i386/i386/mp_desc.h3
-rw-r--r--i386/i386/pcb.h1
-rw-r--r--i386/i386/pic.c106
-rw-r--r--i386/i386/pic.h15
-rw-r--r--i386/i386/pit.c6
-rw-r--r--i386/i386/sched_param.h4
-rw-r--r--i386/i386/spl.S90
-rw-r--r--i386/i386/spl.h1
-rw-r--r--i386/i386/thread.h1
-rw-r--r--i386/i386/trap.c10
-rw-r--r--i386/i386at/autoconf.c12
-rw-r--r--i386/i386at/com.c12
-rw-r--r--i386/i386at/com.h12
-rw-r--r--i386/i386at/interrupt.S47
-rw-r--r--i386/i386at/kd.c14
-rw-r--r--i386/i386at/kd.h14
-rw-r--r--i386/i386at/kd_event.c12
-rw-r--r--i386/i386at/kd_event.h12
-rw-r--r--i386/i386at/kd_mouse.c12
-rw-r--r--i386/i386at/kd_mouse.h6
-rw-r--r--i386/i386at/lpr.c14
-rw-r--r--i386/i386at/lpr.h12
-rw-r--r--i386/i386at/mem.c2
-rw-r--r--i386/i386at/mem.h2
-rw-r--r--i386/i386at/model_dep.c18
-rw-r--r--i386/i386at/model_dep.h2
-rw-r--r--i386/i386at/pic_isa.c9
-rw-r--r--i386/i386at/rtc.c31
-rw-r--r--i386/include/mach/i386/mach_i386.defs2
-rw-r--r--i386/include/mach/i386/mach_i386_types.h1
-rw-r--r--i386/intel/pmap.c70
-rw-r--r--i386/intel/pmap.h6
-rw-r--r--include/device/bpf.h6
-rw-r--r--include/mach/gnumach.defs14
-rw-r--r--include/mach/mach_types.defs1
-rw-r--r--include/mach/mach_types.h1
-rw-r--r--include/mach/memory_object.defs2
-rw-r--r--include/mach/port.h1
-rw-r--r--include/mach/thread_info.h1
-rw-r--r--include/mach/vm_sync.h45
-rw-r--r--ipc/ipc_kmsg.h10
-rw-r--r--ipc/mach_port.c16
-rw-r--r--ipc/mach_port.h4
-rw-r--r--kern/ast.c2
-rw-r--r--kern/ast.h5
-rw-r--r--kern/atomic.h54
-rw-r--r--kern/bootstrap.c10
-rw-r--r--kern/cpu_number.h3
-rw-r--r--kern/gsync.c368
-rw-r--r--kern/host.c5
-rw-r--r--kern/kmutex.c76
-rw-r--r--kern/kmutex.h52
-rw-r--r--kern/machine.h1
-rw-r--r--kern/profile.c6
-rw-r--r--kern/sched.h16
-rw-r--r--kern/sched_prim.c38
-rw-r--r--kern/sched_prim.h2
-rw-r--r--kern/task.c24
-rw-r--r--kern/task.h4
-rw-r--r--kern/thread.c36
-rw-r--r--linux/Makefrag.am2
-rw-r--r--linux/configfrag.ac27
-rw-r--r--linux/dev/arch/i386/kernel/irq.c145
-rw-r--r--linux/dev/drivers/block/ahci.c12
-rw-r--r--linux/dev/drivers/block/genhd.c6
-rw-r--r--linux/dev/glue/block.c15
-rw-r--r--linux/dev/glue/glue.h2
-rw-r--r--linux/dev/glue/net.c4
-rw-r--r--linux/dev/include/asm-i386/system.h8
-rw-r--r--linux/dev/init/main.c1
-rw-r--r--linux/dev/kernel/sched.c2
-rw-r--r--linux/pcmcia-cs/modules/ds.c2
-rw-r--r--linux/src/drivers/block/ide.c3
-rw-r--r--linux/src/drivers/block/triton.c8
-rw-r--r--linux/src/drivers/scsi/NCR53c406a.c2
-rw-r--r--linux/src/include/linux/compiler-gcc.h4
-rw-r--r--linux/src/include/linux/compiler-gcc6.h67
-rw-r--r--vm/vm_fault.c2
-rw-r--r--vm/vm_map.c47
-rw-r--r--vm/vm_map.h4
-rw-r--r--vm/vm_object.c7
-rw-r--r--vm/vm_object.h14
-rw-r--r--vm/vm_user.c52
-rw-r--r--xen/console.c4
-rw-r--r--xen/console.h4
117 files changed, 2007 insertions, 808 deletions
diff --git a/COPYING3 b/COPYING3
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/COPYING3
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/Makefile.am b/Makefile.am
index 67252db8..11312a1e 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -54,6 +54,9 @@ AM_CPPFLAGS += \
AM_CFLAGS += \
-fno-builtin-log
+AM_CCASFLAGS += \
+ -D__ASSEMBLY__
+
# Yes, this makes the eyes hurt. But perhaps someone will finally take care of
# all that scruffy Mach code... Also see <http://savannah.gnu.org/task/?5726>.
AM_CFLAGS += \
@@ -74,6 +77,10 @@ if disable_smashing_stack_protector
AM_CFLAGS += \
-fno-stack-protector
endif
+
+# We do not support or need position-independent
+AM_CFLAGS += \
+ -no-pie -fno-pic
#
# Silent build support.
@@ -162,9 +169,10 @@ noinst_PROGRAMS += \
# This is the list of routines we decide is OK to steal from the C library.
clib_routines := htonl htons ntohl ntohs \
- udivdi3 __udivdi3 __umoddi3 \
+ udivdi3 __udivdi3 __udivmoddi4 __umoddi3 \
+ __divdi3 \
__rel_iplt_start __rel_iplt_end \
- __ffsdi2 \
+ __ffsdi2 ffs \
_START _start etext _edata end _end # actually ld magic, not libc.
gnumach-undef: gnumach.$(OBJEXT)
$(NM_V) $(NM) -u $< | sed 's/ *U *//' | sort -u > $@
@@ -198,7 +206,8 @@ exec_boot_PROGRAMS = \
#
EXTRA_DIST += \
- config.status.dep.patch
+ config.status.dep.patch \
+ Makefile.in.dep.patch
EXTRA_DIST += \
DEVELOPMENT
diff --git a/Makefile.in.dep.patch b/Makefile.in.dep.patch
new file mode 100644
index 00000000..72fb65f6
--- /dev/null
+++ b/Makefile.in.dep.patch
@@ -0,0 +1,19 @@
+--- Makefile.in
++++ Makefile.in
+@@ -4785,7 +4785,15 @@ distclean-compile:
+
+ $(am__depfiles_remade):
+ @$(MKDIR_P) $(@D)
+- @echo '# dummy' >$@-t && $(am__mv) $@-t $@
++ # Ugly bootstrap hack to get to-be-generated files created
++ # Try to guess what file this dependency file is from...
++ @f=$(srcdir)/`dirname "$(@D)"`/`basename "$@" .Po | sed s/lib[^-]\*-//` ; \
++ for f in "$$f"*; do \
++ case $$f in \
++ *.c | *.S) echo "$$f"': $$(filter-out $$(DIST_SOURCES),$$(SOURCES))' ;; \
++ *) echo '# dummy';; \
++ esac ; \
++ done >$@-t && $(am__mv) $@-t $@
+
+ am--depfiles: $(am__depfiles_remade)
+
diff --git a/Makefrag.am b/Makefrag.am
index a44b1a65..fee77894 100644
--- a/Makefrag.am
+++ b/Makefrag.am
@@ -132,6 +132,7 @@ libkernel_a_SOURCES += \
kern/assert.h \
kern/ast.c \
kern/ast.h \
+ kern/atomic.h \
kern/boot_script.h \
kern/bootstrap.c \
kern/bootstrap.h \
@@ -160,6 +161,8 @@ libkernel_a_SOURCES += \
kern/ipc_tt.h \
kern/kalloc.h \
kern/kern_types.h \
+ kern/kmutex.c \
+ kern/kmutex.h \
kern/list.h \
kern/lock.c \
kern/lock.h \
@@ -422,6 +425,7 @@ include_mach_HEADERS = \
include/mach/vm_param.h \
include/mach/vm_prot.h \
include/mach/vm_statistics.h \
+ include/mach/vm_sync.h \
include/mach/vm_wire.h \
include/mach/inline.h \
include/mach/xen.h
diff --git a/configfrag.ac b/configfrag.ac
index 3d7033ec..73c23ffb 100644
--- a/configfrag.ac
+++ b/configfrag.ac
@@ -103,9 +103,6 @@ AC_DEFINE([MACH_VM_DEBUG], [1], [MACH_VM_DEBUG])
# Mach-dep power conservation.
AC_DEFINE([POWER_SAVE], [1], [POWER_SAVE])
-# No hardware clock rollover.
-AC_DEFINE([SIMPLE_CLOCK], [0], [SIMPLE_CLOCK])
-
# Use statistical timing.
AC_DEFINE([STAT_TIME], [1], [STAT_TIME])
diff --git a/configure.ac b/configure.ac
index 40e78a04..d4daa932 100644
--- a/configure.ac
+++ b/configure.ac
@@ -25,7 +25,7 @@ AC_CONFIG_AUX_DIR([build-aux])
AM_INIT_AUTOMAKE(
[1.10.2]
- [dist-bzip2]
+ [dist-xz]
dnl Don't define `PACKAGE' and `VERSION'.
[no-define]
dnl Do not clutter the main build directory.
@@ -216,7 +216,9 @@ dnl 's%#\ dummy%Makefile: $(filter-out $(DIST_SOURCES),$(SOURCES))%' \
dnl config.status
dnl ])
AC_CONFIG_COMMANDS_POST([
- if "$PATCH" -f < "$srcdir"/config.status.dep.patch
+ if "$PATCH" -f < "$srcdir"/config.status.dep.patch > /dev/null 2>&1 ||
+ ( cd "$srcdir" && "$PATCH" -f < Makefile.in.dep.patch ||
+ grep "Ugly bootstrap hack to get to-be-generated files created" Makefile.in ) > /dev/null 2>&1
then] AC_MSG_NOTICE([Applied a patch to work around a deficiency in]
[Automake. See `configure.ac' for details.])
[else] AC_MSG_ERROR([failed to patch using `config.status.dep.patch'.]
diff --git a/ddb/db_access.c b/ddb/db_access.c
index 16d4d3ef..509c1ba4 100644
--- a/ddb/db_access.c
+++ b/ddb/db_access.c
@@ -72,7 +72,8 @@ db_get_task_value(
db_expr_t value;
int i;
- db_read_bytes(addr, size, data, task);
+ if (!db_read_bytes(addr, size, data, task))
+ return 0;
value = 0;
#if BYTE_MSF
diff --git a/ddb/db_command.c b/ddb/db_command.c
index 721f04fe..c9538c6a 100644
--- a/ddb/db_command.c
+++ b/ddb/db_command.c
@@ -63,6 +63,7 @@
#include <vm/vm_print.h>
#include <ipc/ipc_print.h>
+#include <ipc/mach_port.h>
#include <kern/lock.h>
/*
@@ -332,6 +333,23 @@ struct db_command db_show_cmds[] = {
{ (char *)0, }
};
+void
+db_debug_all_traps_cmd(db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif);
+void
+db_debug_port_references_cmd(db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif);
+
+struct db_command db_debug_cmds[] = {
+ { "traps", db_debug_all_traps_cmd, 0, 0 },
+ { "references", db_debug_port_references_cmd, 0, 0 },
+ { (char *)0, }
+};
+
struct db_command db_command_table[] = {
#ifdef DB_MACHINE_COMMANDS
/* this must be the first entry, if it exists */
@@ -364,6 +382,7 @@ struct db_command db_command_table[] = {
{ "macro", db_def_macro_cmd, CS_OWN, 0 },
{ "dmacro", db_del_macro_cmd, CS_OWN, 0 },
{ "show", 0, 0, db_show_cmds },
+ { "debug", 0, 0, db_debug_cmds },
{ "reset", db_reset_cpu, 0, 0 },
{ "reboot", db_reset_cpu, 0, 0 },
{ "halt", db_halt_cpu, 0, 0 },
@@ -538,4 +557,32 @@ db_option(modif, option)
return(FALSE);
}
+void
+db_debug_all_traps_cmd(db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif)
+{
+ if (strcmp (modif, "on") == 0)
+ db_debug_all_traps (TRUE);
+ else if (strcmp (modif, "off") == 0)
+ db_debug_all_traps (FALSE);
+ else
+ db_error ("debug traps /on|/off\n");
+}
+
+void
+db_debug_port_references_cmd(db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif)
+{
+ if (strcmp (modif, "on") == 0)
+ db_debug_port_references (TRUE);
+ else if (strcmp (modif, "off") == 0)
+ db_debug_port_references (FALSE);
+ else
+ db_error ("debug references /on|/off\n");
+}
+
#endif /* MACH_KDB */
diff --git a/ddb/db_examine.c b/ddb/db_examine.c
index 836b0e89..6f94b686 100644
--- a/ddb/db_examine.c
+++ b/ddb/db_examine.c
@@ -473,7 +473,10 @@ db_xcdump(
if (!DB_CHECK_ACCESS(next_page_addr, sizeof(int), task))
bcount = next_page_addr - addr;
}
- db_read_bytes(addr, bcount, data, task);
+ if (!db_read_bytes(addr, bcount, data, task)) {
+ db_printf("*\n");
+ continue;
+ }
for (i = 0; i < bcount && off != 0; i += size) {
if (i % 4 == 0)
db_printf(" ");
diff --git a/ddb/db_print.c b/ddb/db_print.c
index fb4efaad..832faf57 100644
--- a/ddb/db_print.c
+++ b/ddb/db_print.c
@@ -34,6 +34,7 @@
* Miscellaneous printing.
*/
#include <string.h>
+#include <mach/policy.h>
#include <mach/port.h>
#include <kern/task.h>
#include <kern/thread.h>
@@ -112,6 +113,7 @@ db_show_regs(
#define OPTION_LONG 0x001 /* long print option */
#define OPTION_USER 0x002 /* print ps-like stuff */
+#define OPTION_SCHED 0x004 /* print scheduling info */
#define OPTION_INDENT 0x100 /* print with indent */
#define OPTION_THREAD_TITLE 0x200 /* print thread title */
#define OPTION_TASK_TITLE 0x400 /* print thread title */
@@ -152,10 +154,10 @@ db_print_thread(
if (flag & OPTION_USER) {
char status[8];
char *indent = "";
+ if (flag & OPTION_INDENT)
+ indent = " ";
if (flag & OPTION_LONG) {
- if (flag & OPTION_INDENT)
- indent = " ";
if (flag & OPTION_THREAD_TITLE) {
db_printf("%s ID: THREAD STAT STACK PCB", indent);
db_printf(" SUS PRI CONTINUE,WAIT_FUNC\n");
@@ -177,6 +179,33 @@ db_print_thread(
db_task_printsym((db_addr_t)thread->wait_event,
DB_STGY_ANY, kernel_task);
db_printf("\n");
+ } else if (flag & OPTION_SCHED) {
+ if (flag & OPTION_THREAD_TITLE) {
+ db_printf("%s "
+ "STAT PRIORITY POLICY USAGE LAST\n",
+ indent);
+ db_printf("%s ID: "
+ "RWSONF SET MAX COMP DEPR P DATA CPU SCHED UPDATED\n",
+ indent);
+ db_printf(" \n");
+ }
+ db_printf("%s%3d%c %s %4d %4d %4d %4d %c %4d %10d %10d %10d\n",
+ indent, thread_id,
+ (thread == current_thread())? '#': ':',
+ db_thread_stat(thread, status),
+ thread->priority,
+ thread->max_priority,
+ thread->sched_pri,
+ thread->depress_priority,
+#if MACH_FIXPRI
+ thread->policy == POLICY_TIMESHARE ? 'T' : 'F',
+ thread->sched_data,
+#else /* MACH_FIXPRI */
+ 'T', 0,
+#endif /* MACH_FIXPRI */
+ thread->cpu_usage,
+ thread->sched_usage,
+ thread->sched_stamp);
} else {
if (thread_id % 3 == 0) {
if (flag & OPTION_INDENT)
@@ -228,7 +257,7 @@ db_print_task(
if (flag & OPTION_TASK_TITLE) {
db_printf(" ID: TASK MAP THD SUS PR %s",
DB_TASK_NAME_TITLE);
- if ((flag & OPTION_LONG) == 0)
+ if ((flag & (OPTION_LONG|OPTION_SCHED)) == 0)
db_printf(" THREADS");
db_printf("\n");
}
@@ -237,7 +266,7 @@ db_print_task(
2*sizeof(vm_offset_t), task->map, task->thread_count,
task->suspend_count, task->priority);
DB_TASK_NAME(task);
- if (flag & OPTION_LONG) {
+ if (flag & (OPTION_LONG|OPTION_SCHED)) {
if (flag & OPTION_TASK_TITLE)
flag |= OPTION_THREAD_TITLE;
db_printf("\n");
@@ -249,7 +278,7 @@ db_print_task(
flag &= ~OPTION_THREAD_TITLE;
thread_id++;
}
- if ((flag & OPTION_LONG) == 0)
+ if ((flag & (OPTION_LONG|OPTION_SCHED)) == 0)
db_printf("\n");
} else {
if (flag & OPTION_TASK_TITLE)
@@ -318,6 +347,8 @@ db_show_all_threads(addr, have_addr, count, modif)
flag |= OPTION_USER;
if (db_option(modif, 'l'))
flag |= OPTION_LONG;
+ if (db_option(modif, 's'))
+ flag |= OPTION_SCHED;
task_id = 0;
queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
@@ -368,6 +399,8 @@ db_show_one_thread(addr, have_addr, count, modif)
flag |= OPTION_USER;
if (db_option(modif, 'l'))
flag |= OPTION_LONG;
+ if (db_option(modif, 's'))
+ flag |= OPTION_SCHED;
if (!have_addr) {
thread = current_thread();
diff --git a/ddb/db_task_thread.c b/ddb/db_task_thread.c
index 7927e674..f7fbb805 100644
--- a/ddb/db_task_thread.c
+++ b/ddb/db_task_thread.c
@@ -303,4 +303,29 @@ db_get_task_thread(
return;
}
+/*
+ * convert $mapXXX type DDB variable to map address
+ */
+void
+db_get_map(struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
+{
+ task_t task;
+
+ if (flag != DB_VAR_GET) {
+ db_error("Cannot set to $map variable\n");
+ /* NOTREACHED */
+ }
+
+ if ((task = db_lookup_task_id(ap->suffix[0])) == TASK_NULL) {
+ db_printf("no such map($map%d)\n", ap->suffix[0]);
+ db_error(0);
+ /* NOTREACHED */
+ }
+
+ *valuep = (db_expr_t) task->map;
+}
+
#endif /* MACH_KDB */
diff --git a/ddb/db_task_thread.h b/ddb/db_task_thread.h
index cbb36802..55ab4f53 100644
--- a/ddb/db_task_thread.h
+++ b/ddb/db_task_thread.h
@@ -64,4 +64,10 @@ db_get_task_thread(
int flag,
db_var_aux_param_t ap);
+extern void
+db_get_map(struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap);
+
#endif /* _DDB_DB_TASK_THREAD_H_ */
diff --git a/ddb/db_variables.c b/ddb/db_variables.c
index 4442ccbc..0fd9bad0 100644
--- a/ddb/db_variables.c
+++ b/ddb/db_variables.c
@@ -61,6 +61,8 @@ struct db_variable db_vars[] = {
{ "thread", 0, db_set_default_thread },
{ "task", 0, db_get_task_thread,
1, 2, -1, -1 },
+ { "map", 0, db_get_map,
+ 1, 1, -1, -1 },
{ "work", &db_work[0], FCN_NULL,
1, 1, 0, DB_NWORK-1 },
{ "arg", 0, db_arg_variable,
diff --git a/device/blkio.c b/device/blkio.c
index e5b4d09f..7ec1f2cf 100644
--- a/device/blkio.c
+++ b/device/blkio.c
@@ -101,7 +101,7 @@ void minphys(io_req_t ior)
* Dummy routine placed in device switch entries to indicate that
* block device may be mapped.
*/
-int block_io_mmap(dev_t dev, vm_offset_t off, int prot)
+vm_offset_t block_io_mmap(dev_t dev, vm_offset_t off, int prot)
{
return (0);
}
diff --git a/device/blkio.h b/device/blkio.h
index 77eb105a..aaff9f8a 100644
--- a/device/blkio.h
+++ b/device/blkio.h
@@ -19,6 +19,6 @@
#ifndef _DEVICE_BLKIO_H_
#define _DEVICE_BLKIO_H_
-extern int block_io_mmap(dev_t dev, vm_offset_t off, int prot);
+extern vm_offset_t block_io_mmap(dev_t dev, vm_offset_t off, int prot);
#endif /* _DEVICE_BLKIO_H_ */
diff --git a/device/conf.h b/device/conf.h
index fea18223..1af00285 100644
--- a/device/conf.h
+++ b/device/conf.h
@@ -35,6 +35,7 @@
#include <sys/types.h>
#include <mach/port.h>
#include <mach/vm_prot.h>
+#include <device/device_types.h>
struct io_req;
typedef struct io_req *io_req_t;
@@ -50,9 +51,9 @@ struct dev_ops {
void (*d_close)(dev_t, int); /* close device */
int (*d_read)(dev_t, io_req_t); /* read */
int (*d_write)(dev_t, io_req_t); /* write */
- int (*d_getstat)(dev_t, int, int *, natural_t *); /* get status/control */
- int (*d_setstat)(dev_t, int, int *, natural_t); /* set status/control */
- int (*d_mmap)(dev_t, vm_offset_t, vm_prot_t); /* map memory */
+ int (*d_getstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t *); /* get status/control */
+ int (*d_setstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t); /* set status/control */
+ vm_offset_t (*d_mmap)(dev_t, vm_offset_t, vm_prot_t); /* map memory */
int (*d_async_in)(); /* asynchronous input setup */
int (*d_reset)(); /* reset device */
int (*d_port_death)(dev_t, mach_port_t);
@@ -71,11 +72,11 @@ extern int nulldev_open(dev_t dev, int flag, io_req_t ior);
extern void nulldev_close(dev_t dev, int flags);
extern int nulldev_read(dev_t dev, io_req_t ior);
extern int nulldev_write(dev_t dev, io_req_t ior);
-extern io_return_t nulldev_getstat(dev_t dev, int flavor, int *data, natural_t *count);
-extern io_return_t nulldev_setstat(dev_t dev, int flavor, int *data, natural_t count);
+extern io_return_t nulldev_getstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count);
+extern io_return_t nulldev_setstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count);
extern io_return_t nulldev_portdeath(dev_t dev, mach_port_t port);
extern int nodev(void); /* no operation - error */
-extern int nomap(dev_t dev, vm_offset_t off, int prot); /* no operation - error */
+extern vm_offset_t nomap(dev_t dev, vm_offset_t off, int prot); /* no operation - error */
/*
* Flavor constants for d_dev_info routine
diff --git a/device/dev_forward.defs b/device/dev_forward.defs
deleted file mode 100644
index a237bb86..00000000
--- a/device/dev_forward.defs
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * Author: Joseph S. Barrera, Carnegie Mellon University
- * Date: 12/90
- */
-
-subsystem KernelUser dev_forward 2800;
-
-#include <mach/std_types.defs>
-#include <mach/mach_types.defs>
-#include <device/device_types.defs>
-
-type reply_port_t = MACH_MSG_TYPE_MOVE_SEND_ONCE | polymorphic
- ctype: mach_port_t;
-
-simpleroutine forward_device_open_send(
- master_port : mach_port_t;
- ureplyport reply_port : reply_port_t;
- mode : dev_mode_t;
- name : dev_name_t);
diff --git a/device/dev_hdr.h b/device/dev_hdr.h
index ff7d2ef5..ad98e0bb 100644
--- a/device/dev_hdr.h
+++ b/device/dev_hdr.h
@@ -61,7 +61,7 @@
#include <kern/lock.h>
#include <kern/queue.h>
-#include <device/conf.h>
+typedef struct dev_ops *dev_ops_t;
/* This structure is associated with each open device port.
The port representing the device points to this structure. */
diff --git a/device/dev_name.c b/device/dev_name.c
index 175e3890..f6133679 100644
--- a/device/dev_name.c
+++ b/device/dev_name.c
@@ -63,12 +63,12 @@ int nulldev_write(dev_t dev, io_req_t ior)
return (D_SUCCESS);
}
-io_return_t nulldev_getstat(dev_t dev, int flavor, int *data, natural_t *count)
+io_return_t nulldev_getstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count)
{
return (D_SUCCESS);
}
-io_return_t nulldev_setstat(dev_t dev, int flavor, int *data, natural_t count)
+io_return_t nulldev_setstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count)
{
return (D_SUCCESS);
}
@@ -83,10 +83,10 @@ int nodev(void)
return (D_INVALID_OPERATION);
}
-int
+vm_offset_t
nomap(dev_t dev, vm_offset_t off, int prot)
{
- return (D_INVALID_OPERATION);
+ return -1;
}
/*
diff --git a/device/dev_pager.c b/device/dev_pager.c
index 7130229b..0680b5aa 100644
--- a/device/dev_pager.c
+++ b/device/dev_pager.c
@@ -57,6 +57,7 @@
#include <device/memory_object_reply.user.h>
#include <device/dev_pager.h>
#include <device/blkio.h>
+#include <device/conf.h>
/*
* The device pager routines are called directly from the message
diff --git a/device/kmsg.c b/device/kmsg.c
index c80775d9..e49eb3d3 100644
--- a/device/kmsg.c
+++ b/device/kmsg.c
@@ -195,7 +195,7 @@ kmsg_read_done (io_req_t ior)
}
io_return_t
-kmsggetstat (dev_t dev, int flavor, int *data, unsigned int *count)
+kmsggetstat (dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count)
{
switch (flavor)
{
diff --git a/device/kmsg.h b/device/kmsg.h
index 8d907f1b..00a35050 100644
--- a/device/kmsg.h
+++ b/device/kmsg.h
@@ -10,8 +10,8 @@
io_return_t kmsgopen (dev_t dev, int flag, io_req_t ior);
void kmsgclose (dev_t dev, int flag);
io_return_t kmsgread (dev_t dev, io_req_t ior);
-io_return_t kmsggetstat (dev_t dev, int flavor,
- int *data, unsigned int *count);
+io_return_t kmsggetstat (dev_t dev, dev_flavor_t flavor,
+ dev_status_t data, mach_msg_type_number_t *count);
void kmsg_putchar (int c);
diff --git a/device/net_io.c b/device/net_io.c
index 99af0b29..72b040a0 100644
--- a/device/net_io.c
+++ b/device/net_io.c
@@ -1374,7 +1374,7 @@ net_getstat(
struct ifnet *ifp,
dev_flavor_t flavor,
dev_status_t status, /* pointer to OUT array */
- natural_t *count) /* OUT */
+ mach_msg_type_number_t *count) /* OUT */
{
switch (flavor) {
case NET_STATUS:
diff --git a/device/net_io.h b/device/net_io.h
index d4e24d41..5baf0678 100644
--- a/device/net_io.h
+++ b/device/net_io.h
@@ -78,7 +78,7 @@ extern void net_ast(void);
extern void net_packet(struct ifnet *, ipc_kmsg_t, unsigned int, boolean_t);
extern void net_filter(ipc_kmsg_t, ipc_kmsg_queue_t);
extern io_return_t net_getstat(struct ifnet *, dev_flavor_t, dev_status_t,
- natural_t *);
+ mach_msg_type_number_t *);
extern io_return_t net_write(struct ifnet *, int (*)(), io_req_t);
/*
diff --git a/device/tty.h b/device/tty.h
index d7aa2add..ea6f4404 100644
--- a/device/tty.h
+++ b/device/tty.h
@@ -72,8 +72,8 @@ struct tty {
* Items beyond this point should be removed to device-specific
* extension structures.
*/
- io_return_t (*t_getstat)(dev_t, int, int *, natural_t *); /* routine to get status */
- io_return_t (*t_setstat)(dev_t, int, int *, natural_t); /* routine to set status */
+ io_return_t (*t_getstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t *); /* routine to get status */
+ io_return_t (*t_setstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t); /* routine to set status */
dev_ops_t t_tops; /* another device to possibly
push through */
};
diff --git a/doc/mach.texi b/doc/mach.texi
index 756731e6..2bd085b2 100644
--- a/doc/mach.texi
+++ b/doc/mach.texi
@@ -1,4 +1,5 @@
\input texinfo @c -*- Texinfo -*-
+@documentencoding ISO-8859-1
@setfilename mach.info
@settitle The GNU Mach Reference Manual
@setchapternewpage odd
@@ -7039,7 +7040,7 @@ will print the contents of a list starting from @code{xxx_list} by each
@item dmacro @var{name}
Delete the macro named @var{name}.
-@item show all threads[/ul]
+@item show all threads[/uls]
Display all tasks and threads information. This version of @code{ddb}
prints more information than previous one. It shows UNIX process
information like @command{ps} for each task. The UNIX process
@@ -7056,7 +7057,12 @@ thread. The status consists of 6 letters, R(run), W(wait), S(suspended),
O(swapped out), N(interruptible), and F(loating) point arithmetic used (if
supported by the platform). If the corresponding
status bit is off, @code{.} is printed instead. If @code{l} option is
-specified, more detail information is printed for each thread.
+specified, more detail information is printed for each thread. If the
+@code{s} option is given, scheduling information is displayed.
+
+@item show all tasks
+Displays all tasks similar to @code{show all threads}, but omits
+information about the individual threads.
@item show task [ @var{addr} ]
Display the information of a task specified by @var{addr}. If
@@ -7120,6 +7126,14 @@ If you want to clear a watch point in user space, specify @code{T} and
parameter is omitted, a task of the default target thread or a current
task is assumed. If you specify a wrong space address, the request is
rejected with an error message.
+
+@item debug traps /on|/off
+Enables or disables debugging of all traps with @code{ddb}.
+
+@item debug references /on|/off
+Enables or disables debugging of all port reference counting errors
+with @code{ddb}.
+
@end table
@@ -7142,6 +7156,11 @@ Task or thread structure address. @var{xx} and @var{yy} are task and
thread identification numbers printed by a @code{show all threads}
command respectively. This variable is read only.
+@item map@var{xx}
+VM map structure address. @var{xx} is a task identification number
+printed by a @code{show all tasks} command. This variable is read
+only.
+
@item thread
The default target thread. The value is used when @code{t} option is
specified without explicit thread structure address parameter in command
diff --git a/i386/i386/db_interface.c b/i386/i386/db_interface.c
index b3fac0bb..aac29395 100644
--- a/i386/i386/db_interface.c
+++ b/i386/i386/db_interface.c
@@ -446,8 +446,6 @@ db_user_to_kernel_address(
}
if (flag) {
db_printf("\nno memory is assigned to address %08x\n", addr);
- db_error(0);
- /* NOTREACHED */
}
return(-1);
}
@@ -459,7 +457,7 @@ db_user_to_kernel_address(
* Read bytes from kernel address space for debugger.
*/
-void
+boolean_t
db_read_bytes(
vm_offset_t addr,
int size,
@@ -477,17 +475,16 @@ db_read_bytes(
while (--size >= 0) {
if (addr < VM_MIN_KERNEL_ADDRESS && task == TASK_NULL) {
db_printf("\nbad address %x\n", addr);
- db_error(0);
- /* NOTREACHED */
+ return FALSE;
}
addr++;
*data++ = *src++;
}
- return;
+ return TRUE;
}
while (size > 0) {
if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
- return;
+ return FALSE;
src = (char *)kern_addr;
n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
if (n > size)
@@ -497,6 +494,7 @@ db_read_bytes(
while (--n >= 0)
*data++ = *src++;
}
+ return TRUE;
}
/*
diff --git a/i386/i386/db_interface.h b/i386/i386/db_interface.h
index 8d7daeae..18ee3291 100644
--- a/i386/i386/db_interface.h
+++ b/i386/i386/db_interface.h
@@ -32,7 +32,7 @@ extern boolean_t kdb_trap (
int code,
struct i386_saved_state *regs);
-extern void db_read_bytes (
+extern boolean_t db_read_bytes (
vm_offset_t addr,
int size,
char *data,
@@ -129,4 +129,6 @@ db_write_bytes_user_space(
char *data,
task_t task);
+void db_debug_all_traps (boolean_t enable);
+
#endif /* _I386_DB_INTERFACE_H_ */
diff --git a/i386/i386/io_perm.c b/i386/i386/io_perm.c
index 3224fdd3..c966102c 100644
--- a/i386/i386/io_perm.c
+++ b/i386/i386/io_perm.c
@@ -67,10 +67,22 @@
#include "io_perm.h"
#include "gdt.h"
#include "pcb.h"
+
+#define PCI_CFG1_START 0xcf8
+#define PCI_CFG1_END 0xcff
+#define PCI_CFG2_START 0xc000
+#define PCI_CFG2_END 0xcfff
+
+#define CONTAINS_PCI_CFG(from, to) \
+ ( ( ( from <= PCI_CFG1_END ) && ( to >= PCI_CFG1_START ) ) || \
+ ( ( from <= PCI_CFG2_END ) && ( to >= PCI_CFG2_START ) ) )
+
/* Our device emulation ops. See below, at the bottom of this file. */
static struct device_emulation_ops io_perm_device_emulation_ops;
+/* Flag to hold PCI io cfg access lock */
+static boolean_t taken_pci_cfg = FALSE;
/* The outtran which allows MIG to convert an io_perm_t object to a port
representing it. */
@@ -107,17 +119,15 @@ convert_port_to_io_perm (ipc_port_t port)
return io_perm;
}
-#if TODO_REMOVE_ME
-/* TODO. Fix this comment. */
/* The destructor which is called when the last send right to a port
representing an io_perm_t object vanishes. */
void
io_perm_deallocate (io_perm_t io_perm)
{
- /* TODO. Is there anything to deallocate in here? I don't think so, as we
- don't allocate anything in `convert_port_to_io_perm'. */
+ /* We need to check if the io_perm was a PCI cfg one and release it */
+ if (CONTAINS_PCI_CFG(io_perm->from, io_perm->to))
+ taken_pci_cfg = FALSE;
}
-#endif
/* Our ``no senders'' handling routine. Deallocate the object. */
static
@@ -185,6 +195,10 @@ i386_io_perm_create (const ipc_port_t master_port, io_port_t from, io_port_t to,
if (from > to)
return KERN_INVALID_ARGUMENT;
+ /* Only one process may take a range that includes PCI cfg registers */
+ if (taken_pci_cfg && CONTAINS_PCI_CFG(from, to))
+ return KERN_PROTECTION_FAILURE;
+
io_perm_t io_perm;
io_perm = (io_perm_t) kalloc (sizeof *io_perm);
@@ -216,6 +230,9 @@ i386_io_perm_create (const ipc_port_t master_port, io_port_t from, io_port_t to,
*new = io_perm;
+ if (CONTAINS_PCI_CFG(from, to))
+ taken_pci_cfg = TRUE;
+
return KERN_SUCCESS;
}
diff --git a/i386/i386/io_perm.h b/i386/i386/io_perm.h
index a7f1f6fe..b97cf973 100644
--- a/i386/i386/io_perm.h
+++ b/i386/i386/io_perm.h
@@ -58,8 +58,6 @@ typedef struct io_perm *io_perm_t;
extern io_perm_t convert_port_to_io_perm (ipc_port_t);
extern ipc_port_t convert_io_perm_to_port (io_perm_t);
-#if TODO_REMOVE_ME
extern void io_perm_deallocate (io_perm_t);
-#endif
#endif /* _I386_IO_PERM_H_ */
diff --git a/i386/i386/ipl.h b/i386/i386/ipl.h
index 2da2e89f..fb939789 100644
--- a/i386/i386/ipl.h
+++ b/i386/i386/ipl.h
@@ -74,7 +74,6 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <machine/machspl.h>
extern void (*ivect[])();
extern int iunit[];
-extern int intpri[];
extern spl_t curr_ipl;
#endif /* __ASSEMBLER__ */
#endif /* KERNEL */
diff --git a/i386/i386/lock.h b/i386/i386/lock.h
index b989927b..8efa0ca0 100644
--- a/i386/i386/lock.h
+++ b/i386/i386/lock.h
@@ -47,7 +47,7 @@
({ int _old_val_; \
asm volatile("xchgl %0, %2" \
: "=r" (_old_val_) \
- : "0" (new_val), "m" (*(lock) : "memory") \
+ : "0" (new_val), "m" (*(lock)) : "memory" \
); \
_old_val_; \
})
diff --git a/i386/i386/mp_desc.h b/i386/i386/mp_desc.h
index 9f963123..4a9cbdf6 100644
--- a/i386/i386/mp_desc.h
+++ b/i386/i386/mp_desc.h
@@ -77,6 +77,9 @@ extern struct real_descriptor *mp_gdt[NCPUS];
extern struct mp_desc_table * mp_desc_init(int);
+extern void interrupt_processor(int cpu);
+
+
#endif /* MULTIPROCESSOR */
extern void start_other_cpus(void);
diff --git a/i386/i386/pcb.h b/i386/i386/pcb.h
index cf476942..30d96271 100644
--- a/i386/i386/pcb.h
+++ b/i386/i386/pcb.h
@@ -30,6 +30,7 @@
#include <mach/exec/exec.h>
#include <mach/thread_status.h>
#include <machine/thread.h>
+#include <machine/io_perm.h>
extern void pcb_init (task_t parent_task, thread_t thread);
diff --git a/i386/i386/pic.c b/i386/i386/pic.c
index e8c881af..0feebc6f 100644
--- a/i386/i386/pic.c
+++ b/i386/i386/pic.c
@@ -57,7 +57,6 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <i386/pio.h>
spl_t curr_ipl;
-int pic_mask[NSPL];
int curr_pic_mask;
int iunit[NINTR] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
@@ -72,33 +71,18 @@ u_short PICM_ICW4, PICS_ICW4 ;
/*
** picinit() - This routine
** * Establishes a table of interrupt vectors
-** * Establishes a table of interrupt priority levels
-** * Establishes a table of interrupt masks to be put
-** in the PICs.
** * Establishes location of PICs in the system
+** * Unmasks all interrupts in the PICs
** * Initialises them
**
** At this stage the interrupt functionality of this system should be
-** coplete.
-**
+** complete.
*/
-
/*
-** 1. First we form a table of PIC masks - rather then calling form_pic_mask()
-** each time there is a change of interrupt level - we will form a table
-** of pic masks, as there are only 7 interrupt priority levels.
-**
-** 2. The next thing we must do is to determine which of the PIC interrupt
-** request lines have to be masked out, this is done by calling
-** form_pic_mask() with a (int_lev) of zero, this will find all the
-** interrupt lines that have priority 0, (ie to be ignored).
-** Then we split this up for the master/slave PICs.
-**
-** 2. Initialise the PICs , master first, then the slave.
-** All the register field definitions are described in pic_jh.h, also
-** the settings of these fields for the various registers are selected.
-**
+** Initialise the PICs , master first, then the slave.
+** All the register field definitions are described in pic.h also
+** the settings of these fields for the various registers are selected.
*/
void
@@ -108,23 +92,14 @@ picinit(void)
asm("cli");
/*
- ** 1. Form pic mask table
- */
-#if 0
- printf (" Let the console driver screw up this line ! \n");
-#endif
-
- form_pic_mask();
-
- /*
- ** 1a. Select current SPL.
+ ** 0. Initialise the current level to match cli()
*/
curr_ipl = SPLHI;
- curr_pic_mask = pic_mask[SPLHI];
+ curr_pic_mask = 0;
/*
- ** 2. Generate addresses to each PIC port.
+ ** 1. Generate addresses to each PIC port.
*/
master_icw = PIC_MASTER_ICW;
@@ -133,7 +108,7 @@ picinit(void)
slaves_ocw = PIC_SLAVE_OCW;
/*
- ** 3. Select options for each ICW and each OCW for each PIC.
+ ** 2. Select options for each ICW and each OCW for each PIC.
*/
PICM_ICW1 =
@@ -164,9 +139,8 @@ picinit(void)
PICM_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
PICS_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
-
/*
- ** 4. Initialise master - send commands to master PIC
+ ** 3. Initialise master - send commands to master PIC
*/
outb ( master_icw, PICM_ICW1 );
@@ -178,7 +152,7 @@ picinit(void)
outb ( master_icw, PICM_OCW3 );
/*
- ** 5. Initialise slave - send commands to slave PIC
+ ** 4. Initialise slave - send commands to slave PIC
*/
outb ( slaves_icw, PICS_ICW1 );
@@ -191,61 +165,23 @@ picinit(void)
outb ( slaves_icw, PICS_OCW3 );
/*
- ** 6. Initialise interrupts
+ ** 5. Initialise interrupts
*/
outb ( master_ocw, PICM_OCW1 );
-#if 0
- printf(" spl set to %x \n", curr_pic_mask);
-#endif
-
-}
-
-
-/*
-** form_pic_mask(int_lvl)
-**
-** For a given interrupt priority level (int_lvl), this routine goes out
-** and scans through the interrupt level table, and forms a mask based on the
-** entries it finds there that have the same or lower interrupt priority level
-** as (int_lvl). It returns a 16-bit mask which will have to be split up between
-** the 2 pics.
-**
-*/
-
-#if defined(AT386)
-#define SLAVEMASK (0xFFFF ^ SLAVE_ON_IR2)
-#endif /* defined(AT386) */
-
-#define SLAVEACTV 0xFF00
-
-void
-form_pic_mask(void)
-{
- unsigned int i, j, bit, mask;
-
- for (i=SPL0; i < NSPL; i++) {
- for (j=0x00, bit=0x01, mask = 0; j < NINTR; j++, bit<<=1)
- if (intpri[j] <= i)
- mask |= bit;
-
- if ((mask & SLAVEACTV) != SLAVEACTV )
- mask &= SLAVEMASK;
-
- pic_mask[i] = mask;
- }
}
void
intnull(int unit_dev)
{
- printf("intnull(%d)\n", unit_dev);
-}
-
-int prtnull_count = 0;
+ static char warned[NINTR];
+
+ if (unit_dev >= NINTR)
+ printf("Unknown interrupt %d\n", unit_dev);
+ else if (!warned[unit_dev])
+ {
+ printf("intnull(%d)\n", unit_dev);
+ warned[unit_dev] = 1;
+ }
-void
-prtnull(int unit)
-{
- ++prtnull_count;
}
diff --git a/i386/i386/pic.h b/i386/i386/pic.h
index 80bf65d6..553c4bcc 100644
--- a/i386/i386/pic.h
+++ b/i386/i386/pic.h
@@ -145,13 +145,13 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define NON_SPEC_EOI 0x20
-#define SPECIFIC_EOI 0x30
-#define ROT_NON_SPEC 0x50
-#define SET_ROT_AEOI 0x40
+#define SPECIFIC_EOI 0x60
+#define ROT_NON_SPEC 0xA0
+#define SET_ROT_AEOI 0x80
#define RSET_ROTAEOI 0x00
-#define ROT_SPEC_EOI 0x70
-#define SET_PRIORITY 0x60
-#define NO_OPERATION 0x20
+#define ROT_SPEC_EOI 0xE0
+#define SET_PRIORITY 0xC0
+#define NO_OPERATION 0x40
#define SEND_EOI_IR0 0x00
#define SEND_EOI_IR1 0x01
@@ -177,11 +177,8 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#define READ_IS_ONRD 0x01
#ifndef __ASSEMBLER__
-extern void form_pic_mask (void);
extern void picinit (void);
extern int curr_pic_mask;
-extern int pic_mask[];
-extern void prtnull(int unit);
extern void intnull(int unit);
#endif /* __ASSEMBLER__ */
diff --git a/i386/i386/pit.c b/i386/i386/pit.c
index da683308..4e3feeec 100644
--- a/i386/i386/pit.c
+++ b/i386/i386/pit.c
@@ -54,6 +54,7 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <i386/pic.h>
#include <i386/pit.h>
#include <i386/pio.h>
+#include <kern/cpu_number.h>
int pitctl_port = PITCTL_PORT; /* For 386/20 Board */
int pitctr0_port = PITCTR0_PORT; /* For 386/20 Board */
@@ -70,8 +71,9 @@ clkstart(void)
unsigned char byte;
unsigned long s;
- intpri[0] = SPLHI;
- form_pic_mask();
+ if (cpu_number() != 0)
+ /* Only one PIT initialization is needed */
+ return;
s = sploff(); /* disable interrupts */
diff --git a/i386/i386/sched_param.h b/i386/i386/sched_param.h
index e7e855f4..c93ed8a2 100644
--- a/i386/i386/sched_param.h
+++ b/i386/i386/sched_param.h
@@ -31,10 +31,10 @@
#define _I386_SCHED_PARAM_H_
/*
- * Sequent requires a right shift of 18 bits to convert
+ * Sequent requires a right shift of 17 bits to convert
* microseconds to priorities.
*/
-#define PRI_SHIFT 18
+#define PRI_SHIFT 17
#endif /* _I386_SCHED_PARAM_H_ */
diff --git a/i386/i386/spl.S b/i386/i386/spl.S
index 307739f5..215142c9 100644
--- a/i386/i386/spl.S
+++ b/i386/i386/spl.S
@@ -19,7 +19,6 @@
#include <mach/machine/asm.h>
#include <i386/ipl.h>
-#include <i386/pic.h>
#include <i386/i386asm.h>
#include <i386/xen.h>
@@ -30,39 +29,9 @@
#endif
/*
- * Set IPL to the specified value.
- *
- * NOTE: Normally we would not have to enable interrupts
- * here. Linux drivers, however, use cli()/sti(), so we must
- * guard against the case where a Mach routine which
- * has done an spl() calls a Linux routine that returns
- * with interrupts disabled. A subsequent splx() can,
- * potentially, return with interrupts disabled.
+ * Program XEN evt masks from %eax.
*/
-#define SETIPL(level) \
- mb; \
- movl $(level),%edx; \
- cmpl EXT(curr_ipl),%edx; \
- jne spl; \
- sti; \
- movl %edx,%eax; \
- ret
-
-/*
- * Program PICs with mask in %eax.
- */
-#ifndef MACH_XEN
-#define SETMASK() \
- cmpl EXT(curr_pic_mask),%eax; \
- je 9f; \
- outb %al,$(PIC_MASTER_OCW); \
- movl %eax,EXT(curr_pic_mask); \
- movb %ah,%al; \
- outb %al,$(PIC_SLAVE_OCW); \
-9:
-#else /* MACH_XEN */
-#define pic_mask int_mask
-#define SETMASK() \
+#define XEN_SETMASK() \
pushl %ebx; \
movl %eax,%ebx; \
xchgl %eax,hyp_shared_info+EVTMASK; \
@@ -74,7 +43,6 @@
lock orl $1,hyp_shared_info+CPU_PENDING_SEL; /* Yes, activate it */ \
movb $1,hyp_shared_info+CPU_PENDING; \
9:
-#endif /* MACH_XEN */
ENTRY(spl0)
mb;
@@ -109,50 +77,50 @@ ENTRY(spl0)
cmpl $(SPL0),EXT(curr_ipl) /* are we at spl0? */
je 1f /* yes, all done */
movl $(SPL0),EXT(curr_ipl) /* set ipl */
- movl EXT(pic_mask)+SPL0*4,%eax
- /* get PIC mask */
- SETMASK() /* program PICs with new mask */
+#ifdef MACH_XEN
+ movl EXT(int_mask)+SPL0*4,%eax
+ /* get xen mask */
+ XEN_SETMASK() /* program xen evts */
+#endif
1:
sti /* enable interrupts */
popl %eax /* return previous mask */
ret
+
+/*
+ * Historically, SETIPL(level) was called
+ * for spl levels 1-6, now we have combined
+ * all the intermediate levels into the highest level
+ * such that interrupts are either on or off,
+ * since modern hardware can handle it.
+ * This simplifies the interrupt handling
+ * especially for the linux drivers.
+ */
Entry(splsoftclock)
ENTRY(spl1)
- SETIPL(SPL1)
-
ENTRY(spl2)
- SETIPL(SPL2)
-
ENTRY(spl3)
- SETIPL(SPL3)
-
Entry(splnet)
Entry(splhdw)
ENTRY(spl4)
- SETIPL(SPL4)
-
Entry(splbio)
Entry(spldcm)
ENTRY(spl5)
- SETIPL(SPL5)
-
Entry(spltty)
Entry(splimp)
Entry(splvm)
ENTRY(spl6)
- SETIPL(SPL6)
-
Entry(splclock)
Entry(splsched)
Entry(splhigh)
Entry(splhi)
ENTRY(spl7)
mb;
- /* ipl7 just clears IF */
- movl $SPL7,%eax
- xchgl EXT(curr_ipl),%eax
+ /* just clear IF */
cli
+ movl $SPL7,%eax
+ xchgl EXT(curr_ipl),%eax
ret
ENTRY(splx)
@@ -223,9 +191,11 @@ splx_cli:
cmpl EXT(curr_ipl),%edx /* same ipl as current? */
je 1f /* yes, all done */
movl %edx,EXT(curr_ipl) /* set ipl */
- movl EXT(pic_mask)(,%edx,4),%eax
- /* get PIC mask */
- SETMASK() /* program PICs with new mask */
+#ifdef MACH_XEN
+ movl EXT(int_mask)(,%edx,4),%eax
+ /* get int mask */
+ XEN_SETMASK() /* program xen evts with new mask */
+#endif
1:
ret
@@ -250,11 +220,15 @@ spl:
#endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
cmpl $SPL7,%edx /* spl7? */
je EXT(spl7) /* yes, handle specially */
- movl EXT(pic_mask)(,%edx,4),%eax
- /* get PIC mask */
+#ifdef MACH_XEN
+ movl EXT(int_mask)(,%edx,4),%eax
+ /* get int mask */
+#endif
cli /* disable interrupts */
xchgl EXT(curr_ipl),%edx /* set ipl */
- SETMASK() /* program PICs with new mask */
+#ifdef MACH_XEN
+ XEN_SETMASK() /* program PICs with new mask */
+#endif
sti /* enable interrupts */
movl %edx,%eax /* return previous ipl */
ret
diff --git a/i386/i386/spl.h b/i386/i386/spl.h
index 00c51458..173629fe 100644
--- a/i386/i386/spl.h
+++ b/i386/i386/spl.h
@@ -54,6 +54,7 @@ extern spl_t (spldcm)(void);
extern spl_t (spl6)(void);
extern spl_t (spltty)(void);
extern spl_t (splimp)(void);
+extern spl_t (splvm)(void);
extern spl_t (spl7)(void);
extern spl_t (splclock)(void);
diff --git a/i386/i386/thread.h b/i386/i386/thread.h
index 9bda11f5..bae61e31 100644
--- a/i386/i386/thread.h
+++ b/i386/i386/thread.h
@@ -40,7 +40,6 @@
#include <kern/lock.h>
-#include <i386/tss.h>
#include "gdt.h"
/*
diff --git a/i386/i386/trap.c b/i386/i386/trap.c
index d4bdc7f2..d3f61314 100644
--- a/i386/i386/trap.c
+++ b/i386/i386/trap.c
@@ -626,3 +626,13 @@ interrupted_pc(t)
return iss->eip;
}
#endif /* MACH_PCSAMPLE > 0 */
+
+#if MACH_KDB
+
+void
+db_debug_all_traps (boolean_t enable)
+{
+ debug_all_traps_with_kdb = enable;
+}
+
+#endif /* MACH_KDB */
diff --git a/i386/i386at/autoconf.c b/i386/i386at/autoconf.c
index 908c3ec0..151e3fd2 100644
--- a/i386/i386at/autoconf.c
+++ b/i386/i386at/autoconf.c
@@ -126,13 +126,11 @@ void take_dev_irq(
{
int pic = (int)dev->sysdep1;
- if (intpri[pic] == 0) {
+ if (ivect[pic] == intnull) {
iunit[pic] = dev->unit;
ivect[pic] = dev->intr;
- intpri[pic] = (int)dev->sysdep;
- form_pic_mask();
} else {
- printf("The device below will clobber IRQ %d.\n", pic);
+ printf("The device below will clobber IRQ %d (%p).\n", pic, ivect[pic]);
printf("You have two devices at the same IRQ.\n");
printf("This won't work. Reconfigure your hardware and try again.\n");
printf("%s%d: port = %lx, spl = %ld, pic = %d.\n",
@@ -147,13 +145,11 @@ void take_ctlr_irq(
const struct bus_ctlr *ctlr)
{
int pic = ctlr->sysdep1;
- if (intpri[pic] == 0) {
+ if (ivect[pic] == intnull) {
iunit[pic] = ctlr->unit;
ivect[pic] = ctlr->intr;
- intpri[pic] = (int)ctlr->sysdep;
- form_pic_mask();
} else {
- printf("The device below will clobber IRQ %d.\n", pic);
+ printf("The device below will clobber IRQ %d (%p).\n", pic, ivect[pic]);
printf("You have two devices at the same IRQ. This won't work.\n");
printf("Reconfigure your hardware and try again.\n");
while (1);
diff --git a/i386/i386at/com.c b/i386/i386at/com.c
index 84891bd2..4689e30d 100644
--- a/i386/i386at/com.c
+++ b/i386/i386at/com.c
@@ -443,9 +443,9 @@ mach_port_t port;
io_return_t
comgetstat(dev, flavor, data, count)
dev_t dev;
-int flavor;
-int *data; /* pointer to OUT array */
-natural_t *count; /* out */
+dev_flavor_t flavor;
+dev_status_t data; /* pointer to OUT array */
+mach_msg_type_number_t *count; /* out */
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
@@ -466,9 +466,9 @@ natural_t *count; /* out */
io_return_t
comsetstat(
dev_t dev,
- int flavor,
- int * data,
- natural_t count)
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count)
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
diff --git a/i386/i386at/com.h b/i386/i386at/com.h
index 779cdba8..a415488c 100644
--- a/i386/i386at/com.h
+++ b/i386/i386at/com.h
@@ -60,16 +60,16 @@ void comattach(struct bus_device *dev);
extern io_return_t
comgetstat(
dev_t dev,
- int flavor,
- int *data,
- natural_t *count);
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
extern io_return_t
comsetstat(
dev_t dev,
- int flavor,
- int *data,
- natural_t count);
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count);
#if MACH_KDB
extern void kdb_kintr(void);
diff --git a/i386/i386at/interrupt.S b/i386/i386at/interrupt.S
index cdb385c6..23a2e582 100644
--- a/i386/i386at/interrupt.S
+++ b/i386/i386at/interrupt.S
@@ -30,8 +30,7 @@ ENTRY(interrupt)
pushl %eax /* save irq number */
movl %eax,%ecx /* copy irq number */
shll $2,%ecx /* irq * 4 */
- movl EXT(intpri)(%ecx),%edx /* get new ipl */
- call spl /* set ipl */
+ call spl7 /* set ipl */
movl EXT(iunit)(%ecx),%edx /* get device unit number */
pushl %eax /* push previous ipl */
pushl %edx /* push unit number */
@@ -39,14 +38,44 @@ ENTRY(interrupt)
addl $4,%esp /* pop unit number */
call splx_cli /* restore previous ipl */
addl $4,%esp /* pop previous ipl */
+
cli /* XXX no more nested interrupts */
- popl %eax /* restore irq number */
- movl %eax,%ecx /* copy irq number */
- movb $(NON_SPEC_EOI),%al /* non-specific EOI */
- outb %al,$(PIC_MASTER_ICW) /* ack interrupt to master */
+ popl %ecx /* restore irq number */
+
+ movl $1,%eax
+ shll %cl,%eax /* get corresponding IRQ mask */
+ orl EXT(curr_pic_mask),%eax /* add current mask */
+
cmpl $8,%ecx /* do we need to ack slave? */
- jl 1f /* no, skip it */
- outb %al,$(PIC_SLAVE_ICW)
+ jl 1f /* no, only master */
+
+ /* EOI on slave */
+ movb %ah,%al
+ outb %al,$(PIC_SLAVE_OCW) /* mask slave out */
+
+ movb $(SPECIFIC_EOI),%al /* specific EOI for this irq */
+ andb $7,%cl /* irq number for the slave */
+ orb %cl,%al /* combine them */
+ outb %al,$(PIC_SLAVE_ICW) /* ack interrupt to slave */
+
+ movb $(SPECIFIC_EOI + I_AM_SLAVE_2),%al /* specific master EOI for cascaded slave */
+ outb %al,$(PIC_MASTER_ICW) /* ack interrupt to master */
+
+ movl EXT(curr_pic_mask),%eax /* restore original mask */
+ movb %ah,%al
+ outb %al,$(PIC_SLAVE_OCW) /* unmask slave */
+ jmp 2f
+
1:
- ret /* return */
+ /* EOI on master */
+ outb %al,$(PIC_MASTER_OCW) /* mask master out */
+
+ movb $(SPECIFIC_EOI),%al /* specific EOI for this irq */
+ orb %cl,%al /* combine with irq number */
+ outb %al,$(PIC_MASTER_ICW) /* ack interrupt to master */
+
+ movl EXT(curr_pic_mask),%eax /* restore original mask */
+ outb %al,$(PIC_MASTER_OCW) /* unmask master */
+2:
+ ret
END(interrupt)
diff --git a/i386/i386at/kd.c b/i386/i386at/kd.c
index 9ed3958a..8e9222a0 100644
--- a/i386/i386at/kd.c
+++ b/i386/i386at/kd.c
@@ -545,7 +545,7 @@ io_req_t uio;
*/
/*ARGSUSED*/
-int
+vm_offset_t
kdmmap(dev, off, prot)
dev_t dev;
vm_offset_t off;
@@ -569,9 +569,9 @@ kdportdeath(
/*ARGSUSED*/
io_return_t kdgetstat(
dev_t dev,
- int flavor,
- int * data, /* pointer to OUT array */
- natural_t *count) /* OUT */
+ dev_flavor_t flavor,
+ dev_status_t data, /* pointer to OUT array */
+ mach_msg_type_number_t *count) /* OUT */
{
io_return_t result;
@@ -599,9 +599,9 @@ io_return_t kdgetstat(
/*ARGSUSED*/
io_return_t kdsetstat(
dev_t dev,
- int flavor,
- int * data,
- natural_t count)
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count)
{
io_return_t result;
diff --git a/i386/i386at/kd.h b/i386/i386at/kd.h
index 60cee7e3..6f425ae9 100644
--- a/i386/i386at/kd.h
+++ b/i386/i386at/kd.h
@@ -760,18 +760,18 @@ extern int kdwrite(dev_t dev, io_req_t uio);
extern io_return_t kdgetstat(
dev_t dev,
- int flavor,
- int *data,
- natural_t *count);
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
extern io_return_t kdsetstat(
dev_t dev,
- int flavor,
- int * data,
- natural_t count);
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count);
extern int kdportdeath(dev_t dev, mach_port_t port);
-extern int kdmmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+extern vm_offset_t kdmmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
boolean_t kdcheckmagic(Scancode scancode);
diff --git a/i386/i386at/kd_event.c b/i386/i386at/kd_event.c
index 694c165e..bed9240f 100644
--- a/i386/i386at/kd_event.c
+++ b/i386/i386at/kd_event.c
@@ -145,9 +145,9 @@ kbdclose(
io_return_t kbdgetstat(
dev_t dev,
- int flavor,
- int * data, /* pointer to OUT array */
- unsigned int *count) /* OUT */
+ dev_flavor_t flavor,
+ dev_status_t data, /* pointer to OUT array */
+ mach_msg_type_number_t *count) /* OUT */
{
switch (flavor) {
case KDGKBDTYPE:
@@ -167,9 +167,9 @@ io_return_t kbdgetstat(
io_return_t kbdsetstat(
dev_t dev,
- int flavor,
- int * data,
- unsigned int count)
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count)
{
switch (flavor) {
case KDSKBDMODE:
diff --git a/i386/i386at/kd_event.h b/i386/i386at/kd_event.h
index 8b2d6642..7e66f762 100644
--- a/i386/i386at/kd_event.h
+++ b/i386/i386at/kd_event.h
@@ -40,15 +40,15 @@ extern int kbdread(dev_t dev, io_req_t ior);
extern io_return_t kbdgetstat(
dev_t dev,
- int flavor,
- int *data,
- unsigned int *count);
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
extern io_return_t kbdsetstat(
dev_t dev,
- int flavor,
- int *data,
- unsigned int count);
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count);
extern void kd_enqsc(Scancode sc);
diff --git a/i386/i386at/kd_mouse.c b/i386/i386at/kd_mouse.c
index ece13efe..2995587c 100644
--- a/i386/i386at/kd_mouse.c
+++ b/i386/i386at/kd_mouse.c
@@ -84,7 +84,6 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
static void (*oldvect)(); /* old interrupt vector */
static int oldunit;
-static spl_t oldspl;
extern struct bus_device *cominfo[];
kd_event_queue mouse_queue; /* queue of mouse events */
@@ -226,9 +225,6 @@ kd_mouse_open(
oldvect = ivect[mouse_pic];
ivect[mouse_pic] = kdintr;
- oldspl = intpri[mouse_pic];
- intpri[mouse_pic] = SPL6;
- form_pic_mask();
splx(s);
}
@@ -290,16 +286,14 @@ kd_mouse_close(
spl_t s = splhi();
ivect[mouse_pic] = oldvect;
- intpri[mouse_pic] = oldspl;
- form_pic_mask();
splx(s);
}
io_return_t mousegetstat(
dev_t dev,
- int flavor,
- int * data, /* pointer to OUT array */
- unsigned int *count) /* OUT */
+ dev_flavor_t flavor,
+ dev_status_t data, /* pointer to OUT array */
+ mach_msg_type_number_t *count) /* OUT */
{
switch (flavor) {
case DEV_GET_SIZE:
diff --git a/i386/i386at/kd_mouse.h b/i386/i386at/kd_mouse.h
index a8a72a3b..2d813c4a 100644
--- a/i386/i386at/kd_mouse.h
+++ b/i386/i386at/kd_mouse.h
@@ -60,9 +60,9 @@ extern int mouseread(dev_t dev, io_req_t ior);
extern io_return_t mousegetstat(
dev_t dev,
- int flavor,
- int *data,
- unsigned int *count);
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
void mouseintr(int unit);
boolean_t mouse_read_done(io_req_t ior);
diff --git a/i386/i386at/lpr.c b/i386/i386at/lpr.c
index 8303be59..49cb1387 100644
--- a/i386/i386at/lpr.c
+++ b/i386/i386at/lpr.c
@@ -122,7 +122,7 @@ lpropen(dev_t dev, int flag, io_req_t ior)
tp = &lpr_tty[unit];
addr = (u_short) isai->address;
tp->t_dev = dev;
- tp->t_addr = addr;
+ tp->t_addr = (void*) (natural_t) addr;
tp->t_oproc = lprstart;
tp->t_state |= TS_WOPEN;
tp->t_stop = lprstop;
@@ -178,9 +178,9 @@ mach_port_t port;
io_return_t
lprgetstat(dev, flavor, data, count)
dev_t dev;
-int flavor;
-int *data; /* pointer to OUT array */
-natural_t *count; /* out */
+dev_flavor_t flavor;
+dev_status_t data; /* pointer to OUT array */
+mach_msg_type_number_t *count; /* out */
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
@@ -196,9 +196,9 @@ natural_t *count; /* out */
io_return_t
lprsetstat(
dev_t dev,
- int flavor,
- int * data,
- natural_t count)
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count)
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
diff --git a/i386/i386at/lpr.h b/i386/i386at/lpr.h
index 269fd643..cab30166 100644
--- a/i386/i386at/lpr.h
+++ b/i386/i386at/lpr.h
@@ -44,16 +44,16 @@ void lprattach(struct bus_device *dev);
extern io_return_t
lprgetstat(
dev_t dev,
- int flavor,
- int *data,
- natural_t *count);
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
extern io_return_t
lprsetstat(
dev_t dev,
- int flavor,
- int *data,
- natural_t count);
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count);
void lprpr_addr(unsigned short addr);
diff --git a/i386/i386at/mem.c b/i386/i386at/mem.c
index eac2549f..61143185 100644
--- a/i386/i386at/mem.c
+++ b/i386/i386at/mem.c
@@ -30,7 +30,7 @@
/* This provides access to any memory that is not main RAM */
/*ARGSUSED*/
-int
+vm_offset_t
memmmap(dev, off, prot)
dev_t dev;
vm_offset_t off;
diff --git a/i386/i386at/mem.h b/i386/i386at/mem.h
index 0bc85ea4..a5b4aefe 100644
--- a/i386/i386at/mem.h
+++ b/i386/i386at/mem.h
@@ -19,6 +19,6 @@
#ifndef _MEM_H_
#define _MEM_H_
-extern int memmmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+extern vm_offset_t memmmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
#endif /* _MEM_H_ */
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index a7abf5ce..04660a6d 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -244,7 +244,8 @@ void halt_all_cpus(boolean_t reboot)
#ifdef MACH_HYP
hyp_halt();
#endif /* MACH_HYP */
- printf("In tight loop: hit ctl-alt-del to reboot\n");
+ printf("Shutdown completed successfully, now in tight loop.\n");
+ printf("You can safely power off the system or hit ctl-alt-del to reboot\n");
(void) spl0();
}
while (TRUE)
@@ -288,7 +289,7 @@ register_boot_data(const struct multiboot_raw_info *mbi)
+ strlen((void *)phystokv(mbi->cmdline)) + 1, TRUE);
}
- if (mbi->flags & MULTIBOOT_LOADER_MODULES) {
+ if (mbi->flags & MULTIBOOT_LOADER_MODULES && mbi->mods_count) {
i = mbi->mods_count * sizeof(struct multiboot_raw_module);
biosmem_register_boot_data(mbi->mods_addr, mbi->mods_addr + i, TRUE);
@@ -296,7 +297,8 @@ register_boot_data(const struct multiboot_raw_info *mbi)
for (i = 0; i < mbi->mods_count; i++) {
mod = (struct multiboot_raw_module *)tmp + i;
- biosmem_register_boot_data(mod->mod_start, mod->mod_end, TRUE);
+ if (mod->mod_end != mod->mod_start)
+ biosmem_register_boot_data(mod->mod_start, mod->mod_end, TRUE);
if (mod->string != 0) {
biosmem_register_boot_data(mod->string,
@@ -309,7 +311,8 @@ register_boot_data(const struct multiboot_raw_info *mbi)
if (mbi->flags & MULTIBOOT_LOADER_SHDR) {
tmp = mbi->shdr_num * mbi->shdr_size;
- biosmem_register_boot_data(mbi->shdr_addr, mbi->shdr_addr + tmp, FALSE);
+ if (tmp != 0)
+ biosmem_register_boot_data(mbi->shdr_addr, mbi->shdr_addr + tmp, FALSE);
tmp = phystokv(mbi->shdr_addr);
@@ -320,7 +323,8 @@ register_boot_data(const struct multiboot_raw_info *mbi)
&& (shdr->type != ELF_SHT_STRTAB))
continue;
- biosmem_register_boot_data(shdr->addr, shdr->addr + shdr->size, FALSE);
+ if (shdr->size != 0)
+ biosmem_register_boot_data(shdr->addr, shdr->addr + shdr->size, FALSE);
}
}
}
@@ -373,7 +377,7 @@ i386at_init(void)
boot_info.cmdline = addr;
}
- if (boot_info.flags & MULTIBOOT_MODS) {
+ if (boot_info.flags & MULTIBOOT_MODS && boot_info.mods_count) {
struct multiboot_module *m;
int i;
@@ -642,7 +646,7 @@ void c_boot_entry(vm_offset_t bi)
#include <vm/pmap.h>
#include <mach/time_value.h>
-int
+vm_offset_t
timemmap(dev, off, prot)
dev_t dev;
vm_offset_t off;
diff --git a/i386/i386at/model_dep.h b/i386/i386at/model_dep.h
index 47551b85..d47378a1 100644
--- a/i386/i386at/model_dep.h
+++ b/i386/i386at/model_dep.h
@@ -30,7 +30,7 @@ extern vm_offset_t int_stack_top, int_stack_base;
/* Check whether P points to the interrupt stack. */
#define ON_INT_STACK(P) (((P) & ~(KERNEL_STACK_SIZE-1)) == int_stack_base)
-extern int timemmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+extern vm_offset_t timemmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
void inittodr(void);
diff --git a/i386/i386at/pic_isa.c b/i386/i386at/pic_isa.c
index 0b36534e..b0415c24 100644
--- a/i386/i386at/pic_isa.c
+++ b/i386/i386at/pic_isa.c
@@ -42,7 +42,7 @@ void (*ivect[NINTR])() = {
/* 04 */ intnull, /* comintr, ... */
/* 05 */ intnull, /* comintr, wtintr, ... */
/* 06 */ intnull, /* fdintr, ... */
- /* 07 */ prtnull, /* qdintr, ... */
+ /* 07 */ intnull, /* qdintr, ... */
/* 08 */ intnull,
/* 09 */ intnull, /* ether */
@@ -54,10 +54,3 @@ void (*ivect[NINTR])() = {
/* 14 */ intnull, /* hdintr, ... */
/* 15 */ intnull, /* ??? */
};
-
-int intpri[NINTR] = {
- /* 00 */ 0, SPL6, 0, 0,
- /* 04 */ 0, 0, 0, 0,
- /* 08 */ 0, 0, 0, 0,
- /* 12 */ 0, SPL1, 0, 0,
-};
diff --git a/i386/i386at/rtc.c b/i386/i386at/rtc.c
index 01e09772..6e5cdeb3 100644
--- a/i386/i386at/rtc.c
+++ b/i386/i386at/rtc.c
@@ -53,6 +53,10 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <i386/pio.h>
#include <i386at/rtc.h>
+/* time of day stored in RTC are currently between 1970 and 2070. Update that
+ * before 2070 please. */
+#define CENTURY_START 1970
+
static boolean_t first_rtcopen_ever = TRUE;
void
@@ -109,7 +113,24 @@ static int month[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
int
yeartoday(int year)
{
- return((year%4) ? 365 : 366);
+ if (year%4)
+ /* Not divisible by 4, not bissextile */
+ return 365;
+
+ /* Divisible by 4 */
+ if (year % 100)
+ /* Not divisible by 100, bissextile */
+ return 366;
+
+ /* Divisible by 100 */
+ if (year % 400)
+ /* Not divisible by 400, not bissextile */
+ return 365;
+
+ /* Divisible by 400 */
+ /* Rules for 2000 and further have not been officially decided yet.
+ * 2000 was made bissextile. */
+ return 366;
}
int
@@ -146,7 +167,9 @@ readtodc(u_int *tp)
dom = hexdectodec(rtclk.rtc_dom);
mon = hexdectodec(rtclk.rtc_mon);
yr = hexdectodec(rtclk.rtc_yr);
- yr = (yr < 70) ? yr+100 : yr;
+ yr = (yr < CENTURY_START%100) ?
+ yr+CENTURY_START-CENTURY_START%100+100 :
+ yr+CENTURY_START-CENTURY_START%100;
n = sec + 60 * min + 3600 * hr;
n += (dom - 1) * 3600 * 24;
@@ -156,7 +179,8 @@ readtodc(u_int *tp)
for (i = mon - 2; i >= 0; i--)
days += month[i];
month[1] = 28;
- for (i = 70; i < yr; i++)
+ /* Epoch shall be 1970 January 1st */
+ for (i = 1970; i < yr; i++)
days += yeartoday(i);
n += days * 3600 * 24;
@@ -191,6 +215,7 @@ writetodc(void)
n = (time.tv_sec - diff) / (3600 * 24); /* days */
rtclk.rtc_dow = (n + 4) % 7; /* 1/1/70 is Thursday */
+ /* Epoch shall be 1970 January 1st */
for (j = 1970, i = yeartoday(j); n >= i; j++, i = yeartoday(j))
n -= i;
diff --git a/i386/include/mach/i386/mach_i386.defs b/i386/include/mach/i386/mach_i386.defs
index 0703d59a..a8cb91ce 100644
--- a/i386/include/mach/i386/mach_i386.defs
+++ b/i386/include/mach/i386/mach_i386.defs
@@ -51,9 +51,7 @@ type io_perm_t = mach_port_t
#if KERNEL_SERVER
intran: io_perm_t convert_port_to_io_perm(mach_port_t)
outtran: mach_port_t convert_io_perm_to_port(io_perm_t)
-#if TODO_REMOVE_ME
destructor: io_perm_deallocate(io_perm_t)
-#endif
#endif /* KERNEL_SERVER */
;
diff --git a/i386/include/mach/i386/mach_i386_types.h b/i386/include/mach/i386/mach_i386_types.h
index b0552809..f003636d 100644
--- a/i386/include/mach/i386/mach_i386_types.h
+++ b/i386/include/mach/i386/mach_i386_types.h
@@ -40,6 +40,7 @@ struct descriptor {
typedef struct descriptor descriptor_t;
typedef struct descriptor *descriptor_list_t;
+typedef const struct descriptor *const_descriptor_list_t;
/*
* i386 I/O port
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 505b2063..c55b8f2d 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -83,9 +83,14 @@
#include <i386/proc_reg.h>
#include <i386/locore.h>
#include <i386/model_dep.h>
+#include <i386/spl.h>
#include <i386at/biosmem.h>
#include <i386at/model_dep.h>
+#if NCPUS > 1
+#include <i386/mp_desc.h>
+#endif
+
#ifdef MACH_PSEUDO_PHYS
#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = pte_entry?pa_to_ma(pte_entry):0;
#else /* MACH_PSEUDO_PHYS */
@@ -543,7 +548,7 @@ vm_offset_t pmap_map_bd(
if (prot & VM_PROT_WRITE)
template |= INTEL_PTE_WRITE;
- PMAP_READ_LOCK(pmap, spl);
+ PMAP_READ_LOCK(kernel_pmap, spl);
while (start < end) {
pte = pmap_pte(kernel_pmap, virt);
if (pte == PT_ENTRY_NULL)
@@ -572,7 +577,7 @@ vm_offset_t pmap_map_bd(
if (n != i)
panic("couldn't pmap_map_bd\n");
#endif /* MACH_PV_PAGETABLES */
- PMAP_READ_UNLOCK(pmap, spl);
+ PMAP_READ_UNLOCK(kernel_pmap, spl);
return(virt);
}
@@ -691,7 +696,7 @@ void pmap_bootstrap(void)
l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
pmap_set_page_readonly_init(l1_map[n_l1map]);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (l1_map[n_l1map])))
- panic("couldn't pin page %p(%p)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map]));
+ panic("couldn't pin page %p(%lx)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map]));
update.ptr = kv_to_ma(l2_map);
update.val = kv_to_ma(l1_map[n_l1map]) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
hyp_mmu_update(kv_to_la(&update), 1, kv_to_la(&n), DOMID_SELF);
@@ -797,7 +802,7 @@ void pmap_bootstrap(void)
#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly_init(ptable);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (ptable)))
- panic("couldn't pin page %p(%p)\n", ptable, (vm_offset_t) kv_to_ma (ptable));
+ panic("couldn't pin page %p(%lx)\n", ptable, (vm_offset_t) kv_to_ma (ptable));
#endif /* MACH_PV_PAGETABLES */
}
}
@@ -815,10 +820,10 @@ void pmap_set_page_readwrite(void *_vaddr) {
vm_offset_t paddr = kvtophys(vaddr);
vm_offset_t canon_vaddr = phystokv(paddr);
if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE))
- panic("couldn't set hiMMU readwrite for addr %p(%p)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
+ panic("couldn't set hiMMU readwrite for addr %lx(%lx)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
if (canon_vaddr != vaddr)
if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE))
- panic("couldn't set hiMMU readwrite for paddr %p(%p)\n", canon_vaddr, (vm_offset_t) pa_to_ma (paddr));
+ panic("couldn't set hiMMU readwrite for paddr %lx(%lx)\n", canon_vaddr, (vm_offset_t) pa_to_ma (paddr));
}
/* Set a page read only (so as to pin it for instance) */
@@ -828,12 +833,12 @@ void pmap_set_page_readonly(void *_vaddr) {
vm_offset_t canon_vaddr = phystokv(paddr);
if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) {
if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE))
- panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
+ panic("couldn't set hiMMU readonly for vaddr %lx(%lx)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
}
if (canon_vaddr != vaddr &&
*pmap_pde(kernel_pmap, canon_vaddr) & INTEL_PTE_VALID) {
if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE))
- panic("couldn't set hiMMU readonly for vaddr %p canon_vaddr %p paddr %p (%p)\n", vaddr, canon_vaddr, paddr, (vm_offset_t) pa_to_ma (paddr));
+ panic("couldn't set hiMMU readonly for vaddr %lx canon_vaddr %lx paddr %lx (%lx)\n", vaddr, canon_vaddr, paddr, (vm_offset_t) pa_to_ma (paddr));
}
}
@@ -852,12 +857,12 @@ void pmap_set_page_readonly_init(void *_vaddr) {
/* Modify our future kernel map (can't use update_va_mapping for this)... */
if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) {
if (!hyp_mmu_update_la (kvtolin(vaddr), pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID))
- panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ panic("couldn't set hiMMU readonly for vaddr %lx(%lx)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
}
/* ... and the bootstrap map. */
if (*pte & INTEL_PTE_VALID) {
if (hyp_do_update_va_mapping (vaddr, pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID, UVMF_NONE))
- panic("couldn't set MMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ panic("couldn't set MMU readonly for vaddr %lx(%lx)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
}
}
@@ -869,7 +874,7 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
unsigned j;
#endif /* PAE */
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(base)))
- panic("pmap_clear_bootstrap_pagetable: couldn't unpin page %p(%p)\n", base, (vm_offset_t) kv_to_ma(base));
+ panic("pmap_clear_bootstrap_pagetable: couldn't unpin page %p(%lx)\n", base, (vm_offset_t) kv_to_ma(base));
#if PAE
for (j = 0; j < PDPNUM; j++)
{
@@ -1001,8 +1006,7 @@ void pmap_init(void)
KMEM_CACHE_PHYSMEM);
#if PAE
kmem_cache_init(&pdpt_cache, "pdpt",
- PDPNUM * sizeof(pt_entry_t),
- PDPNUM * sizeof(pt_entry_t), NULL,
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
KMEM_CACHE_PHYSMEM);
#endif
s = (vm_size_t) sizeof(struct pv_entry);
@@ -1102,7 +1106,7 @@ void pmap_map_mfn(void *_addr, unsigned long mfn) {
#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly((void*) ptp);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, pa_to_mfn(ptp)))
- panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
+ panic("couldn't pin page %lx(%lx)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
#endif /* MACH_PV_PAGETABLES */
pdp = pmap_pde(kernel_pmap, addr);
@@ -1111,7 +1115,7 @@ void pmap_map_mfn(void *_addr, unsigned long mfn) {
pa_to_pte(kv_to_ma(ptp)) | INTEL_PTE_VALID
| INTEL_PTE_USER
| INTEL_PTE_WRITE))
- panic("%s:%d could not set pde %p(%p) to %p(%p)\n",__FILE__,__LINE__,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp), ptp, (vm_offset_t) pa_to_ma(ptp));
+ panic("%s:%d could not set pde %llx(%lx) to %lx(%lx)\n",__FILE__,__LINE__,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp), ptp, (vm_offset_t) pa_to_ma(ptp));
#else /* MACH_PV_PAGETABLES */
*pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
| INTEL_PTE_USER
@@ -1122,7 +1126,7 @@ void pmap_map_mfn(void *_addr, unsigned long mfn) {
#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte), ma | INTEL_PTE_VALID | INTEL_PTE_WRITE))
- panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte), ma, ma_to_pa(ma));
+ panic("%s:%d could not set pte %p(%lx) to %llx(%llx)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte), ma, ma_to_pa(ma));
#else /* MACH_PV_PAGETABLES */
/* Note: in this case, mfn is actually a pfn. */
WRITE_PTE(pte, ma | INTEL_PTE_VALID | INTEL_PTE_WRITE);
@@ -1318,7 +1322,7 @@ void pmap_destroy(pmap_t p)
vm_page_lock_queues();
#ifdef MACH_PV_PAGETABLES
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, pa_to_mfn(pa)))
- panic("pmap_destroy: couldn't unpin page %p(%p)\n", pa, (vm_offset_t) kv_to_ma(pa));
+ panic("pmap_destroy: couldn't unpin page %llx(%lx)\n", pa, (vm_offset_t) kv_to_ma(pa));
pmap_set_page_readwrite((void*) phystokv(pa));
#endif /* MACH_PV_PAGETABLES */
vm_page_free(m);
@@ -1370,7 +1374,7 @@ void pmap_reference(pmap_t p)
* Assumes that the pte-page exists.
*/
-/* static */
+static
void pmap_remove_range(
pmap_t pmap,
vm_offset_t va,
@@ -1533,7 +1537,6 @@ void pmap_remove(
vm_offset_t e)
{
int spl;
- pt_entry_t *pde;
pt_entry_t *spte, *epte;
vm_offset_t l;
vm_offset_t _s = s;
@@ -1543,8 +1546,9 @@ void pmap_remove(
PMAP_READ_LOCK(map, spl);
- pde = pmap_pde(map, s);
while (s < e) {
+ pt_entry_t *pde = pmap_pde(map, s);
+
l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
if (l > e)
l = e;
@@ -1555,7 +1559,6 @@ void pmap_remove(
pmap_remove_range(map, s, spte, epte);
}
s = l;
- pde++;
}
PMAP_UPDATE_TLBS(map, _s, e);
@@ -1858,7 +1861,7 @@ void pmap_enter(
phys_addr_t old_pa;
assert(pa != vm_page_fictitious_addr);
-if (pmap_debug) printf("pmap(%lx, %lx)\n", v, pa);
+ if (pmap_debug) printf("pmap(%lx, %llx)\n", v, pa);
if (pmap == PMAP_NULL)
return;
@@ -1921,7 +1924,7 @@ Retry:
* Would have to enter the new page-table page in
* EVERY pmap.
*/
- panic("pmap_expand kernel pmap to %#x", v);
+ panic("pmap_expand kernel pmap to %#lx", v);
}
/*
@@ -1953,24 +1956,23 @@ Retry:
* Enter the new page table page in the page directory.
*/
i = ptes_per_vm_page;
- /*XX pdp = &pmap->dirbase[pdenum(v) & ~(i-1)];*/
pdp = pmap_pde(pmap, v);
do {
#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly((void *) ptp);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn(ptp)))
- panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
+ panic("couldn't pin page %lx(%lx)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
if (!hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdp)),
pa_to_pte(pa_to_ma(kvtophys(ptp))) | INTEL_PTE_VALID
| INTEL_PTE_USER
| INTEL_PTE_WRITE))
- panic("%s:%d could not set pde %p(%p,%p) to %p(%p,%p) %p\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp)));
+ panic("%s:%d could not set pde %p(%llx,%lx) to %lx(%llx,%lx) %lx\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp)));
#else /* MACH_PV_PAGETABLES */
*pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
| INTEL_PTE_USER
| INTEL_PTE_WRITE;
#endif /* MACH_PV_PAGETABLES */
- pdp++;
+ pdp++; /* Note: This is safe b/c we stay in one page. */
ptp += INTEL_PGBYTES;
} while (--i > 0);
@@ -2016,7 +2018,7 @@ Retry:
template |= INTEL_PTE_MOD;
#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template)))
- panic("%s:%d could not set pte %p to %p\n",__FILE__,__LINE__,pte,template);
+ panic("%s:%d could not set pte %p to %llx\n",__FILE__,__LINE__,pte,template);
#else /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, template)
#endif /* MACH_PV_PAGETABLES */
@@ -2126,7 +2128,7 @@ Retry:
do {
#ifdef MACH_PV_PAGETABLES
if (!(hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template))))
- panic("%s:%d could not set pte %p to %p\n",__FILE__,__LINE__,pte,template);
+ panic("%s:%d could not set pte %p to %llx\n",__FILE__,__LINE__,pte,template);
#else /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, template)
#endif /* MACH_PV_PAGETABLES */
@@ -2337,7 +2339,7 @@ void pmap_collect(pmap_t p)
if (!(hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdep++)), 0)))
panic("%s:%d could not clear pde %p\n",__FILE__,__LINE__,pdep-1);
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(ptable)))
- panic("couldn't unpin page %p(%p)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable)));
+ panic("couldn't unpin page %p(%lx)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable)));
pmap_set_page_readwrite(ptable);
#else /* MACH_PV_PAGETABLES */
*pdep++ = 0;
@@ -2386,7 +2388,7 @@ void pmap_collect(pmap_t p)
*/
#if 0
void pmap_activate(my_pmap, th, my_cpu)
- register pmap_t my_pmap;
+ pmap_t my_pmap;
thread_t th;
int my_cpu;
{
@@ -2429,9 +2431,9 @@ pmap_t pmap_kernel()
*/
#if 0
pmap_zero_page(phys)
- register vm_offset_t phys;
+ vm_offset_t phys;
{
- register int i;
+ int i;
assert(phys != vm_page_fictitious_addr);
i = PAGE_SIZE / INTEL_PGBYTES;
@@ -2555,7 +2557,7 @@ phys_attribute_clear(
do {
#ifdef MACH_PV_PAGETABLES
if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~bits)))
- panic("%s:%d could not clear bits %lx from pte %p\n",__FILE__,__LINE__,bits,pte);
+ panic("%s:%d could not clear bits %x from pte %p\n",__FILE__,__LINE__,bits,pte);
#else /* MACH_PV_PAGETABLES */
*pte &= ~bits;
#endif /* MACH_PV_PAGETABLES */
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index ee600cd5..5fa2a0c4 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -461,12 +461,6 @@ extern void pmap_copy_page (phys_addr_t, phys_addr_t);
*/
extern phys_addr_t kvtophys (vm_offset_t);
-void pmap_remove_range(
- pmap_t pmap,
- vm_offset_t va,
- pt_entry_t *spte,
- pt_entry_t *epte);
-
#if NCPUS > 1
void signal_cpus(
cpu_set use_list,
diff --git a/include/device/bpf.h b/include/device/bpf.h
index 7bb5e106..abc2d777 100644
--- a/include/device/bpf.h
+++ b/include/device/bpf.h
@@ -68,8 +68,6 @@
#ifndef _DEVICE_BPF_H_
#define _DEVICE_BPF_H_
-#include <sys/types.h> /* u_short */
-
/*
* Alignment macros. BPF_WORDALIGN rounds up to the next
* even multiple of BPF_ALIGNMENT.
@@ -89,8 +87,8 @@
* It has nothing to do with the source code version.
*/
struct bpf_version {
- u_short bv_major;
- u_short bv_minor;
+ unsigned short bv_major;
+ unsigned short bv_minor;
};
/* Current version number. */
#define BPF_MAJOR_VERSION 1
diff --git a/include/mach/gnumach.defs b/include/mach/gnumach.defs
index b484accc..97ab573c 100644
--- a/include/mach/gnumach.defs
+++ b/include/mach/gnumach.defs
@@ -151,3 +151,17 @@ routine vm_wire_all(
host : mach_port_t;
task : vm_task_t;
flags : vm_wire_t);
+
+routine vm_object_sync(
+ object : memory_object_name_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ should_flush : boolean_t;
+ should_return : boolean_t;
+ should_iosync : boolean_t);
+
+routine vm_msync(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ sync_flags : vm_sync_t);
diff --git a/include/mach/mach_types.defs b/include/mach/mach_types.defs
index 8e68d385..a0e9241c 100644
--- a/include/mach/mach_types.defs
+++ b/include/mach/mach_types.defs
@@ -118,6 +118,7 @@ type vm_inherit_t = int;
type vm_statistics_data_t = struct[13] of integer_t;
type vm_machine_attribute_t = int;
type vm_machine_attribute_val_t = int;
+type vm_sync_t = int;
type thread_info_t = array[*:1024] of integer_t;
type thread_basic_info_data_t = struct[11] of integer_t;
diff --git a/include/mach/mach_types.h b/include/mach/mach_types.h
index 65164a99..57f8f22d 100644
--- a/include/mach/mach_types.h
+++ b/include/mach/mach_types.h
@@ -54,6 +54,7 @@
#include <mach/vm_statistics.h>
#include <mach/vm_cache_statistics.h>
#include <mach/vm_wire.h>
+#include <mach/vm_sync.h>
#ifdef MACH_KERNEL
#include <kern/task.h> /* for task_array_t */
diff --git a/include/mach/memory_object.defs b/include/mach/memory_object.defs
index 0d3c2786..4afd67b2 100644
--- a/include/mach/memory_object.defs
+++ b/include/mach/memory_object.defs
@@ -209,7 +209,7 @@ simpleroutine memory_object_data_unlock(
skip; /* was: memory_object_data_write */
/*
- * Indicate that a previous memory_object_lock_reqeust has been
+ * Indicate that a previous memory_object_lock_request has been
* completed. Note that this call is made on whatever
* port is specified in the memory_object_lock_request; that port
* need not be the memory object port itself.
diff --git a/include/mach/port.h b/include/mach/port.h
index 3036a921..e77e5c38 100644
--- a/include/mach/port.h
+++ b/include/mach/port.h
@@ -41,6 +41,7 @@
typedef vm_offset_t mach_port_t;
typedef mach_port_t *mach_port_array_t;
+typedef const mach_port_t *const_mach_port_array_t;
typedef int *rpc_signature_info_t;
/*
diff --git a/include/mach/thread_info.h b/include/mach/thread_info.h
index 60e8ba7f..569c8c84 100644
--- a/include/mach/thread_info.h
+++ b/include/mach/thread_info.h
@@ -107,6 +107,7 @@ struct thread_sched_info {
integer_t cur_priority; /* current priority */
/*boolean_t*/integer_t depressed; /* depressed ? */
integer_t depress_priority; /* priority depressed from */
+ integer_t last_processor; /* last processor used by the thread */
};
typedef struct thread_sched_info thread_sched_info_data_t;
diff --git a/include/mach/vm_sync.h b/include/mach/vm_sync.h
new file mode 100644
index 00000000..0c7451c4
--- /dev/null
+++ b/include/mach/vm_sync.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Free Software Foundation
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * All Rights Reserved.
+ */
+
+#ifndef _MACH_VM_SYNC_H_
+#define _MACH_VM_SYNC_H_
+
+/*
+ * Types defined:
+ *
+ * vm_sync_t VM synchronization flags
+ */
+
+typedef int vm_sync_t;
+
+/*
+ * Synchronization values
+ */
+
+#define VM_SYNC_ASYNCHRONOUS ((vm_sync_t) 0x01)
+#define VM_SYNC_SYNCHRONOUS ((vm_sync_t) 0x02)
+#define VM_SYNC_INVALIDATE ((vm_sync_t) 0x04)
+#if 0
+/* Not supported yet. */
+#define VM_SYNC_KILLPAGES ((vm_sync_t) 0x08)
+#define VM_SYNC_DEACTIVATE ((vm_sync_t) 0x10)
+#define VM_SYNC_CONTIGUOUS ((vm_sync_t) 0x20)
+#define VM_SYNC_REUSABLEPAGES ((vm_sync_t) 0x40)
+#endif
+
+#endif /* _MACH_VM_SYNC_H_ */
diff --git a/ipc/ipc_kmsg.h b/ipc/ipc_kmsg.h
index 393c0392..c6cd77f0 100644
--- a/ipc/ipc_kmsg.h
+++ b/ipc/ipc_kmsg.h
@@ -147,7 +147,7 @@ MACRO_END
#define ikm_free(kmsg) \
MACRO_BEGIN \
- register vm_size_t _size = (kmsg)->ikm_size; \
+ vm_size_t _size = (kmsg)->ikm_size; \
\
if ((integer_t)_size > 0) \
kfree((vm_offset_t) (kmsg), _size); \
@@ -196,7 +196,7 @@ extern ipc_kmsg_t ipc_kmsg_queue_next(
#define ipc_kmsg_rmqueue_first_macro(queue, kmsg) \
MACRO_BEGIN \
- register ipc_kmsg_t _next; \
+ ipc_kmsg_t _next; \
\
assert((queue)->ikmq_base == (kmsg)); \
\
@@ -205,7 +205,7 @@ MACRO_BEGIN \
assert((kmsg)->ikm_prev == (kmsg)); \
(queue)->ikmq_base = IKM_NULL; \
} else { \
- register ipc_kmsg_t _prev = (kmsg)->ikm_prev; \
+ ipc_kmsg_t _prev = (kmsg)->ikm_prev; \
\
(queue)->ikmq_base = _next; \
_next->ikm_prev = _prev; \
@@ -216,14 +216,14 @@ MACRO_END
#define ipc_kmsg_enqueue_macro(queue, kmsg) \
MACRO_BEGIN \
- register ipc_kmsg_t _first = (queue)->ikmq_base; \
+ ipc_kmsg_t _first = (queue)->ikmq_base; \
\
if (_first == IKM_NULL) { \
(queue)->ikmq_base = (kmsg); \
(kmsg)->ikm_next = (kmsg); \
(kmsg)->ikm_prev = (kmsg); \
} else { \
- register ipc_kmsg_t _last = _first->ikm_prev; \
+ ipc_kmsg_t _last = _first->ikm_prev; \
\
(kmsg)->ikm_next = _first; \
(kmsg)->ikm_prev = _last; \
diff --git a/ipc/mach_port.c b/ipc/mach_port.c
index 5cc39984..b1379b79 100644
--- a/ipc/mach_port.c
+++ b/ipc/mach_port.c
@@ -550,7 +550,7 @@ mach_port_destroy(
kr = ipc_right_lookup_write(space, name, &entry);
if (kr != KERN_SUCCESS) {
if (MACH_PORT_VALID (name) && space == current_space()) {
- printf("task %.*s destroying a bogus port %lu, most probably a bug.\n", sizeof current_task()->name, current_task()->name, name);
+ printf("task %.*s destroying a bogus port %lu, most probably a bug.\n", sizeof current_task()->name, current_task()->name, (unsigned long) name);
if (mach_port_deallocate_debug)
SoftDebugger("mach_port_deallocate");
}
@@ -594,7 +594,7 @@ mach_port_deallocate(
kr = ipc_right_lookup_write(space, name, &entry);
if (kr != KERN_SUCCESS) {
if (MACH_PORT_VALID (name) && space == current_space()) {
- printf("task %.*s deallocating a bogus port %lu, most probably a bug.\n", sizeof current_task()->name, current_task()->name, name);
+ printf("task %.*s deallocating a bogus port %lu, most probably a bug.\n", sizeof current_task()->name, current_task()->name, (unsigned long) name);
if (mach_port_deallocate_debug)
SoftDebugger("mach_port_deallocate");
}
@@ -718,7 +718,7 @@ mach_port_mod_refs(
if (MACH_PORT_VALID (name) && space == current_space()) {
printf("task %.*s %screasing a bogus port "
"%lu by %d, most probably a bug.\n",
- sizeof current_task()->name,
+ (int) (sizeof current_task()->name),
current_task()->name,
delta < 0 ? "de" : "in", name,
delta < 0 ? -delta : delta);
@@ -1566,3 +1566,13 @@ mach_port_clear_protected_payload(
ip_unlock(port);
return KERN_SUCCESS;
}
+
+#if MACH_KDB
+
+void
+db_debug_port_references (boolean_t enable)
+{
+ mach_port_deallocate_debug = enable;
+}
+
+#endif /* MACH_KDB */
diff --git a/ipc/mach_port.h b/ipc/mach_port.h
index c4d9a1c3..073f7946 100644
--- a/ipc/mach_port.h
+++ b/ipc/mach_port.h
@@ -65,4 +65,8 @@ mach_port_get_receive_status(
mach_port_t name,
mach_port_status_t *statusp);
+#if MACH_KDB
+void db_debug_port_references (boolean_t enable);
+#endif /* MACH_KDB */
+
#endif /* _IPC_MACH_PORT_H_ */
diff --git a/kern/ast.c b/kern/ast.c
index 2772ed3e..d2289344 100644
--- a/kern/ast.c
+++ b/kern/ast.c
@@ -227,7 +227,7 @@ ast_check(void)
break;
default:
- panic("ast_check: Bad processor state (cpu %d processor %08x) state: %d",
+ panic("ast_check: Bad processor state (cpu %d processor %p) state: %d",
mycpu, myprocessor, myprocessor->state);
}
diff --git a/kern/ast.h b/kern/ast.h
index 7d472be9..8895ffbc 100644
--- a/kern/ast.h
+++ b/kern/ast.h
@@ -41,6 +41,7 @@
*/
#include "cpu_number.h"
+#include <kern/kern_types.h>
#include <kern/macros.h>
#include <machine/ast.h>
@@ -131,4 +132,8 @@ extern void ast_init (void);
extern void ast_check (void);
+#if NCPUS > 1
+extern void cause_ast_check(const processor_t processor);
+#endif
+
#endif /* _KERN_AST_H_ */
diff --git a/kern/atomic.h b/kern/atomic.h
new file mode 100644
index 00000000..00da1645
--- /dev/null
+++ b/kern/atomic.h
@@ -0,0 +1,54 @@
+/* Copyright (C) 2017 Free Software Foundation, Inc.
+ Contributed by Agustina Arzille <avarzille@riseup.net>, 2017.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either
+ version 2 of the license, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _KERN_ATOMIC_H_
+#define _KERN_ATOMIC_H_ 1
+
+/* Atomically compare *PTR with EXP and set it to NVAL if they're equal.
+ * Evaluates to a boolean, indicating whether the comparison was successful.*/
+#define __atomic_cas_helper(ptr, exp, nval, mo) \
+ ({ \
+ typeof(exp) __e = (exp); \
+ __atomic_compare_exchange_n ((ptr), &__e, (nval), 0, \
+ __ATOMIC_##mo, __ATOMIC_RELAXED); \
+ })
+
+#define atomic_cas_acq(ptr, exp, nval) \
+ __atomic_cas_helper (ptr, exp, nval, ACQUIRE)
+
+#define atomic_cas_rel(ptr, exp, nval) \
+ __atomic_cas_helper (ptr, exp, nval, RELEASE)
+
+#define atomic_cas_seq(ptr, exp, nval) \
+ __atomic_cas_helper (ptr, exp, nval, SEQ_CST)
+
+/* Atomically exchange the value of *PTR with VAL, evaluating to
+ * its previous value. */
+#define __atomic_swap_helper(ptr, val, mo) \
+ __atomic_exchange_n ((ptr), (val), __ATOMIC_##mo)
+
+#define atomic_swap_acq(ptr, val) \
+ __atomic_swap_helper (ptr, val, ACQUIRE)
+
+#define atomic_swap_rel(ptr, val) \
+ __atomic_swap_helper (ptr, val, RELEASE)
+
+#define atomic_swap_seq(ptr, val) \
+ __atomic_swap_helper (ptr, val, SEQ_CST)
+
+#endif
diff --git a/kern/bootstrap.c b/kern/bootstrap.c
index 7398ea44..8b88d17d 100644
--- a/kern/bootstrap.c
+++ b/kern/bootstrap.c
@@ -180,6 +180,12 @@ void bootstrap_create(void)
if (losers)
panic ("cannot set boot-script variable device-port: %s",
boot_script_error_string (losers));
+ losers = boot_script_set_variable
+ ("kernel-task", VAL_PORT,
+ (long) kernel_task->itk_self);
+ if (losers)
+ panic ("cannot set boot-script variable kernel-task: %s",
+ boot_script_error_string (losers));
losers = boot_script_set_variable ("kernel-command-line", VAL_STR,
(long) kernel_cmdline);
@@ -487,7 +493,7 @@ read_exec(void *handle, vm_offset_t file_ofs, vm_size_t file_size,
static void copy_bootstrap(void *e, exec_info_t *boot_exec_info)
{
- //register vm_map_t user_map = current_task()->map;
+ /* vm_map_t user_map = current_task()->map; */
int err;
if ((err = exec_load(boot_read, read_exec, e, boot_exec_info)))
@@ -813,7 +819,7 @@ boot_script_free (void *ptr, unsigned int size)
int
boot_script_task_create (struct cmd *cmd)
{
- kern_return_t rc = task_create(TASK_NULL, FALSE, &cmd->task);
+ kern_return_t rc = task_create_kernel(TASK_NULL, FALSE, &cmd->task);
if (rc)
{
printf("boot_script_task_create failed with %x\n", rc);
diff --git a/kern/cpu_number.h b/kern/cpu_number.h
index 650f4042..5d3e4bd1 100644
--- a/kern/cpu_number.h
+++ b/kern/cpu_number.h
@@ -37,7 +37,8 @@ int master_cpu; /* 'master' processor - keeps time */
/* cpu number is always 0 on a single processor system */
#define cpu_number() (0)
+#endif /* NCPUS == 1 */
+
#define CPU_L1_SIZE (1 << CPU_L1_SHIFT)
-#endif /* NCPUS == 1 */
#endif /* _KERN_CPU_NUMBER_H_ */
diff --git a/kern/gsync.c b/kern/gsync.c
index e70e1199..e73a6cf0 100644
--- a/kern/gsync.c
+++ b/kern/gsync.c
@@ -17,36 +17,61 @@
*/
#include <kern/gsync.h>
+#include <kern/kmutex.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
-#include <kern/lock.h>
#include <kern/list.h>
#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
/* An entry in the global hash table. */
struct gsync_hbucket
{
struct list entries;
- decl_simple_lock_data (, lock)
+ struct kmutex lock;
};
/* A key used to uniquely identify an address that a thread is
* waiting on. Its members' values depend on whether said
- * address is shared or task-local. */
-struct gsync_key
+ * address is shared or task-local. Note that different types of keys
+ * should never compare equal, since a task map should never have
+ * the same address as a VM object. */
+union gsync_key
{
- unsigned long u;
- unsigned long v;
+ struct
+ {
+ vm_map_t map;
+ vm_offset_t addr;
+ } local;
+
+ struct
+ {
+ vm_object_t obj;
+ vm_offset_t off;
+ } shared;
+
+ struct
+ {
+ unsigned long u;
+ unsigned long v;
+ } any;
};
/* A thread that is blocked on an address with 'gsync_wait'. */
struct gsync_waiter
{
struct list link;
- struct gsync_key key;
+ union gsync_key key;
thread_t waiter;
};
+/* Needed data for temporary mappings. */
+struct vm_args
+{
+ vm_object_t obj;
+ vm_offset_t off;
+};
+
#define GSYNC_NBUCKETS 512
static struct gsync_hbucket gsync_buckets[GSYNC_NBUCKETS];
@@ -56,97 +81,93 @@ void gsync_setup (void)
for (i = 0; i < GSYNC_NBUCKETS; ++i)
{
list_init (&gsync_buckets[i].entries);
- simple_lock_init (&gsync_buckets[i].lock);
+ kmutex_init (&gsync_buckets[i].lock);
}
}
/* Convenience comparison functions for gsync_key's. */
static inline int
-gsync_key_eq (const struct gsync_key *lp,
- const struct gsync_key *rp)
+gsync_key_eq (const union gsync_key *lp,
+ const union gsync_key *rp)
{
- return (lp->u == rp->u && lp->v == rp->v);
+ return (lp->any.u == rp->any.u && lp->any.v == rp->any.v);
}
static inline int
-gsync_key_lt (const struct gsync_key *lp,
- const struct gsync_key *rp)
+gsync_key_lt (const union gsync_key *lp,
+ const union gsync_key *rp)
{
- return (lp->u < rp->u || (lp->u == rp->u && lp->v < rp->v));
+ return (lp->any.u < rp->any.u ||
+ (lp->any.u == rp->any.u && lp->any.v < rp->any.v));
}
#define MIX2_LL(x, y) ((((x) << 5) | ((x) >> 27)) ^ (y))
static inline unsigned int
-gsync_key_hash (const struct gsync_key *keyp)
+gsync_key_hash (const union gsync_key *keyp)
{
unsigned int ret = sizeof (void *);
#ifndef __LP64__
- ret = MIX2_LL (ret, keyp->u);
- ret = MIX2_LL (ret, keyp->v);
+ ret = MIX2_LL (ret, keyp->any.u);
+ ret = MIX2_LL (ret, keyp->any.v);
#else
- ret = MIX2_LL (ret, keyp->u & ~0U);
- ret = MIX2_LL (ret, keyp->u >> 32);
- ret = MIX2_LL (ret, keyp->v & ~0U);
- ret = MIX2_LL (ret, keyp->v >> 32);
+ ret = MIX2_LL (ret, keyp->any.u & ~0U);
+ ret = MIX2_LL (ret, keyp->any.u >> 32);
+ ret = MIX2_LL (ret, keyp->any.v & ~0U);
+ ret = MIX2_LL (ret, keyp->any.v >> 32);
#endif
return (ret);
}
-/* Test if the passed VM Map can access the address ADDR. The
- * parameter FLAGS is used to specify the width and protection
- * of the address. */
+/* Perform a VM lookup for the address in the map. The FLAGS
+ * parameter is used to specify some attributes for the address,
+ * such as protection. Place the corresponding VM object/offset pair
+ * in VAP. Returns 0 if successful, -1 otherwise. */
static int
-valid_access_p (vm_map_t map, vm_offset_t addr, int flags)
+probe_address (vm_map_t map, vm_offset_t addr,
+ int flags, struct vm_args *vap)
{
vm_prot_t prot = VM_PROT_READ |
((flags & GSYNC_MUTATE) ? VM_PROT_WRITE : 0);
- vm_offset_t size = sizeof (unsigned int) *
- ((flags & GSYNC_QUAD) ? 2 : 1);
+ vm_map_version_t ver;
+ vm_prot_t rprot;
+ boolean_t wired_p;
+
+ if (vm_map_lookup (&map, addr, prot, &ver,
+ &vap->obj, &vap->off, &rprot, &wired_p) != KERN_SUCCESS)
+ return (-1);
+ else if ((rprot & prot) != prot)
+ {
+ vm_object_unlock (vap->obj);
+ return (-1);
+ }
- vm_map_entry_t entry;
- return (vm_map_lookup_entry (map, addr, &entry) &&
- entry->vme_end >= addr + size &&
- (prot & entry->protection) == prot);
+ return (0);
}
-/* Given a task and an address, initialize the key at *KEYP and
- * return the corresponding bucket in the global hash table. */
+/* Initialize the key with its needed members, depending on whether the
+ * address is local or shared. Also stores the VM object and offset inside
+ * the argument VAP for future use. */
static int
-gsync_fill_key (task_t task, vm_offset_t addr,
- int flags, struct gsync_key *keyp)
+gsync_prepare_key (task_t task, vm_offset_t addr, int flags,
+ union gsync_key *keyp, struct vm_args *vap)
{
- if (flags & GSYNC_SHARED)
+ if (probe_address (task->map, addr, flags, vap) < 0)
+ return (-1);
+ else if (flags & GSYNC_SHARED)
{
/* For a shared address, we need the VM object
* and offset as the keys. */
- vm_map_t map = task->map;
- vm_prot_t prot = VM_PROT_READ |
- ((flags & GSYNC_MUTATE) ? VM_PROT_WRITE : 0);
- vm_map_version_t ver;
- vm_prot_t rpr;
- vm_object_t obj;
- vm_offset_t off;
- boolean_t wired_p;
-
- if (unlikely (vm_map_lookup (&map, addr, prot, &ver,
- &obj, &off, &rpr, &wired_p) != KERN_SUCCESS))
- return (-1);
-
- /* The VM object is returned locked. However, we check the
- * address' accessibility later, so we can release it. */
- vm_object_unlock (obj);
-
- keyp->u = (unsigned long)obj;
- keyp->v = (unsigned long)off;
+ keyp->shared.obj = vap->obj;
+ keyp->shared.off = vap->off;
}
else
{
/* Task-local address. The keys are the task's map and
* the virtual address itself. */
- keyp->u = (unsigned long)task->map;
- keyp->v = (unsigned long)addr;
+ keyp->local.map = task->map;
+ keyp->local.addr = addr;
}
return ((int)(gsync_key_hash (keyp) % GSYNC_NBUCKETS));
@@ -160,7 +181,7 @@ node_to_waiter (struct list *nodep)
static inline struct list*
gsync_find_key (const struct list *entries,
- const struct gsync_key *keyp, int *exactp)
+ const union gsync_key *keyp, int *exactp)
{
/* Look for a key that matches. We take advantage of the fact
* that the entries are sorted to break out of the loop as
@@ -182,57 +203,105 @@ gsync_find_key (const struct list *entries,
return (runp);
}
-kern_return_t gsync_wait (task_t task, vm_offset_t addr,
- unsigned int lo, unsigned int hi, natural_t msec, int flags)
+/* Create a temporary mapping in the kernel.*/
+static inline vm_offset_t
+temp_mapping (struct vm_args *vap, vm_offset_t addr, vm_prot_t prot)
{
- if (unlikely (task != current_task()))
- /* Not implemented yet. */
- return (KERN_FAILURE);
+ vm_offset_t paddr = VM_MIN_KERNEL_ADDRESS;
+ /* Adjust the offset for addresses that aren't page-aligned. */
+ vm_offset_t off = vap->off - (addr - trunc_page (addr));
- struct gsync_waiter w;
- int bucket = gsync_fill_key (task, addr, flags, &w.key);
+ if (vm_map_enter (kernel_map, &paddr, PAGE_SIZE,
+ 0, TRUE, vap->obj, off, FALSE, prot, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT) != KERN_SUCCESS)
+ paddr = 0;
+
+ return (paddr);
+}
- if (unlikely (bucket < 0))
+kern_return_t gsync_wait (task_t task, vm_offset_t addr,
+ unsigned int lo, unsigned int hi, natural_t msec, int flags)
+{
+ if (task == 0)
+ return (KERN_INVALID_TASK);
+ else if (addr % sizeof (int) != 0)
return (KERN_INVALID_ADDRESS);
- /* Test that the address is actually valid for the
- * given task. Do so with the read-lock held in order
- * to prevent memory deallocations. */
vm_map_lock_read (task->map);
- struct gsync_hbucket *hbp = gsync_buckets + bucket;
- simple_lock (&hbp->lock);
+ struct gsync_waiter w;
+ struct vm_args va;
+ boolean_t remote = task != current_task ();
+ int bucket = gsync_prepare_key (task, addr, flags, &w.key, &va);
- if (unlikely (!valid_access_p (task->map, addr, flags)))
+ if (bucket < 0)
{
- simple_unlock (&hbp->lock);
vm_map_unlock_read (task->map);
return (KERN_INVALID_ADDRESS);
}
+ else if (remote)
+ /* The VM object is returned locked. However, we are about to acquire
+ * a sleeping lock for a bucket, so we must not hold any simple
+ * locks. To prevent this object from going away, we add a reference
+ * to it when requested. */
+ vm_object_reference_locked (va.obj);
+
+ /* We no longer need the lock on the VM object. */
+ vm_object_unlock (va.obj);
+
+ struct gsync_hbucket *hbp = gsync_buckets + bucket;
+ kmutex_lock (&hbp->lock, FALSE);
/* Before doing any work, check that the expected value(s)
* match the contents of the address. Otherwise, the waiting
* thread could potentially miss a wakeup. */
- if (((unsigned int *)addr)[0] != lo ||
- ((flags & GSYNC_QUAD) &&
- ((unsigned int *)addr)[1] != hi))
+
+ boolean_t equal;
+ if (! remote)
+ equal = ((unsigned int *)addr)[0] == lo &&
+ ((flags & GSYNC_QUAD) == 0 ||
+ ((unsigned int *)addr)[1] == hi);
+ else
{
- simple_unlock (&hbp->lock);
- vm_map_unlock_read (task->map);
- return (KERN_INVALID_ARGUMENT);
+ vm_offset_t paddr = temp_mapping (&va, addr, VM_PROT_READ);
+ if (unlikely (paddr == 0))
+ {
+ kmutex_unlock (&hbp->lock);
+ vm_map_unlock_read (task->map);
+ /* Make sure to remove the reference we added. */
+ vm_object_deallocate (va.obj);
+ return (KERN_MEMORY_FAILURE);
+ }
+
+ vm_offset_t off = addr & (PAGE_SIZE - 1);
+ paddr += off;
+
+ equal = ((unsigned int *)paddr)[0] == lo &&
+ ((flags & GSYNC_QUAD) == 0 ||
+ ((unsigned int *)paddr)[1] == hi);
+
+ paddr -= off;
+
+ /* Note that the call to 'vm_map_remove' will unreference
+ * the VM object, so we don't have to do it ourselves. */
+ vm_map_remove (kernel_map, paddr, paddr + PAGE_SIZE);
}
+ /* Done with the task's map. */
vm_map_unlock_read (task->map);
+ if (! equal)
+ {
+ kmutex_unlock (&hbp->lock);
+ return (KERN_INVALID_ARGUMENT);
+ }
+
/* Look for the first entry in the hash bucket that
* compares strictly greater than this waiter. */
struct list *runp;
list_for_each (&hbp->entries, runp)
- {
- struct gsync_waiter *p = node_to_waiter (runp);
- if (gsync_key_lt (&w.key, &p->key))
- break;
- }
+ if (gsync_key_lt (&w.key, &node_to_waiter(runp)->key))
+ break;
/* Finally, add ourselves to the list and go to sleep. */
list_add (runp->prev, runp, &w.link);
@@ -243,24 +312,23 @@ kern_return_t gsync_wait (task_t task, vm_offset_t addr,
else
thread_will_wait (w.waiter);
- thread_sleep (0, (simple_lock_t)&hbp->lock, TRUE);
+ kmutex_unlock (&hbp->lock);
+ thread_block (thread_no_continuation);
/* We're back. */
- kern_return_t ret = current_thread()->wait_result;
- if (ret != THREAD_AWAKENED)
+ kern_return_t ret = KERN_SUCCESS;
+ if (current_thread()->wait_result != THREAD_AWAKENED)
{
/* We were interrupted or timed out. */
- simple_lock (&hbp->lock);
- if (w.link.next != 0)
+ kmutex_lock (&hbp->lock, FALSE);
+ if (!list_node_unlinked (&w.link))
list_remove (&w.link);
- simple_unlock (&hbp->lock);
+ kmutex_unlock (&hbp->lock);
/* Map the error code. */
- ret = ret == THREAD_INTERRUPTED ?
+ ret = current_thread()->wait_result == THREAD_INTERRUPTED ?
KERN_INTERRUPTED : KERN_TIMEDOUT;
}
- else
- ret = KERN_SUCCESS;
return (ret);
}
@@ -281,34 +349,60 @@ dequeue_waiter (struct list *nodep)
kern_return_t gsync_wake (task_t task,
vm_offset_t addr, unsigned int val, int flags)
{
- if (unlikely (task != current_task()))
- /* Not implemented yet. */
- return (KERN_FAILURE);
-
- struct gsync_key key;
- int bucket = gsync_fill_key (task, addr, flags, &key);
-
- if (unlikely (bucket < 0))
+ if (task == 0)
+ return (KERN_INVALID_TASK);
+ else if (addr % sizeof (int) != 0)
return (KERN_INVALID_ADDRESS);
- kern_return_t ret = KERN_INVALID_ARGUMENT;
-
vm_map_lock_read (task->map);
- struct gsync_hbucket *hbp = gsync_buckets + bucket;
- simple_lock (&hbp->lock);
- if (unlikely (!valid_access_p (task->map, addr, flags)))
+ union gsync_key key;
+ struct vm_args va;
+ int bucket = gsync_prepare_key (task, addr, flags, &key, &va);
+
+ if (bucket < 0)
{
- simple_unlock (&hbp->lock);
vm_map_unlock_read (task->map);
return (KERN_INVALID_ADDRESS);
}
+ else if (current_task () != task && (flags & GSYNC_MUTATE) != 0)
+ /* See above on why we do this. */
+ vm_object_reference_locked (va.obj);
+
+ /* Done with the VM object lock. */
+ vm_object_unlock (va.obj);
+
+ kern_return_t ret = KERN_INVALID_ARGUMENT;
+ struct gsync_hbucket *hbp = gsync_buckets + bucket;
+
+ kmutex_lock (&hbp->lock, FALSE);
if (flags & GSYNC_MUTATE)
- /* Set the contents of the address to the specified value,
- * even if we don't end up waking any threads. Note that
- * the buckets' simple locks give us atomicity. */
- *(unsigned int *)addr = val;
+ {
+ /* Set the contents of the address to the specified value,
+ * even if we don't end up waking any threads. Note that
+ * the buckets' simple locks give us atomicity. */
+
+ if (task != current_task ())
+ {
+ vm_offset_t paddr = temp_mapping (&va, addr,
+ VM_PROT_READ | VM_PROT_WRITE);
+
+ if (paddr == 0)
+ {
+ kmutex_unlock (&hbp->lock);
+ vm_map_unlock_read (task->map);
+ vm_object_deallocate (va.obj);
+ return (KERN_MEMORY_FAILURE);
+ }
+
+ addr = paddr + (addr & (PAGE_SIZE - 1));
+ }
+
+ *(unsigned int *)addr = val;
+ if (task != current_task ())
+ vm_map_remove (kernel_map, addr, addr + sizeof (int));
+ }
vm_map_unlock_read (task->map);
@@ -325,37 +419,35 @@ kern_return_t gsync_wake (task_t task,
ret = KERN_SUCCESS;
}
- simple_unlock (&hbp->lock);
+ kmutex_unlock (&hbp->lock);
return (ret);
}
kern_return_t gsync_requeue (task_t task, vm_offset_t src,
vm_offset_t dst, boolean_t wake_one, int flags)
{
- if (unlikely (task != current_task()))
- /* Not implemented yet. */
- return (KERN_FAILURE);
+ if (task == 0)
+ return (KERN_INVALID_TASK);
+ else if (src % sizeof (int) != 0 || dst % sizeof (int) != 0)
+ return (KERN_INVALID_ADDRESS);
- struct gsync_key src_k, dst_k;
- int src_bkt = gsync_fill_key (task, src, flags, &src_k);
- int dst_bkt = gsync_fill_key (task, dst, flags, &dst_k);
+ union gsync_key src_k, dst_k;
+ struct vm_args va;
- if ((src_bkt | dst_bkt) < 0)
+ int src_bkt = gsync_prepare_key (task, src, flags, &src_k, &va);
+ if (src_bkt < 0)
return (KERN_INVALID_ADDRESS);
- vm_map_lock_read (task->map);
+ /* Unlock the VM object before the second lookup. */
+ vm_object_unlock (va.obj);
- /* We don't actually dereference or modify the contents
- * of the addresses, but we still check that they can
- * be accessed by the task. */
- if (unlikely (!valid_access_p (task->map, src, flags) ||
- !valid_access_p (task->map, dst, flags)))
- {
- vm_map_unlock_read (task->map);
- return (KERN_INVALID_ADDRESS);
- }
+ int dst_bkt = gsync_prepare_key (task, dst, flags, &dst_k, &va);
+ if (dst_bkt < 0)
+ return (KERN_INVALID_ADDRESS);
- vm_map_unlock_read (task->map);
+ /* We never create any temporary mappings in 'requeue', so we
+ * can unlock the VM object right now. */
+ vm_object_unlock (va.obj);
/* If we're asked to unconditionally wake up a waiter, then
* we need to remove a maximum of two threads from the queue. */
@@ -365,23 +457,23 @@ kern_return_t gsync_requeue (task_t task, vm_offset_t src,
/* Acquire the locks in order, to prevent any potential deadlock. */
if (bp1 == bp2)
- simple_lock (&bp1->lock);
+ kmutex_lock (&bp1->lock, FALSE);
else if ((unsigned long)bp1 < (unsigned long)bp2)
{
- simple_lock (&bp1->lock);
- simple_lock (&bp2->lock);
+ kmutex_lock (&bp1->lock, FALSE);
+ kmutex_lock (&bp2->lock, FALSE);
}
else
{
- simple_lock (&bp2->lock);
- simple_lock (&bp1->lock);
+ kmutex_lock (&bp2->lock, FALSE);
+ kmutex_lock (&bp1->lock, FALSE);
}
kern_return_t ret = KERN_SUCCESS;
int exact;
struct list *inp = gsync_find_key (&bp1->entries, &src_k, &exact);
- if (!exact)
+ if (! exact)
/* There are no waiters in the source queue. */
ret = KERN_INVALID_ARGUMENT;
else
@@ -416,9 +508,9 @@ kern_return_t gsync_requeue (task_t task, vm_offset_t src,
}
/* Release the locks and we're done.*/
- simple_unlock (&bp1->lock);
+ kmutex_unlock (&bp1->lock);
if (bp1 != bp2)
- simple_unlock (&bp2->lock);
+ kmutex_unlock (&bp2->lock);
return (ret);
}
diff --git a/kern/host.c b/kern/host.c
index 57280c49..3271b0cd 100644
--- a/kern/host.c
+++ b/kern/host.c
@@ -154,7 +154,7 @@ kern_return_t host_info(
{
host_sched_info_t sched_info;
extern int min_quantum;
- /* minimum quantum, in microseconds */
+ /* minimum quantum, in ticks */
/*
* Return scheduler information.
@@ -165,8 +165,9 @@ kern_return_t host_info(
sched_info = (host_sched_info_t) info;
sched_info->min_timeout = tick / 1000;
- sched_info->min_quantum = min_quantum / 1000;
/* convert microseconds to milliseconds */
+ sched_info->min_quantum = min_quantum * tick / 1000;
+ /* convert ticks to milliseconds */
*count = HOST_SCHED_INFO_COUNT;
return KERN_SUCCESS;
diff --git a/kern/kmutex.c b/kern/kmutex.c
new file mode 100644
index 00000000..5926d1d9
--- /dev/null
+++ b/kern/kmutex.c
@@ -0,0 +1,76 @@
+/* Copyright (C) 2017 Free Software Foundation, Inc.
+ Contributed by Agustina Arzille <avarzille@riseup.net>, 2017.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either
+ version 2 of the license, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <kern/kmutex.h>
+#include <kern/atomic.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+
+void kmutex_init (struct kmutex *mtxp)
+{
+ mtxp->state = KMUTEX_AVAIL;
+ simple_lock_init (&mtxp->lock);
+}
+
+kern_return_t kmutex_lock (struct kmutex *mtxp, boolean_t interruptible)
+{
+ check_simple_locks ();
+
+ if (atomic_cas_acq (&mtxp->state, KMUTEX_AVAIL, KMUTEX_LOCKED))
+ /* Unowned mutex - We're done. */
+ return (KERN_SUCCESS);
+
+ /* The mutex is locked. We may have to sleep. */
+ simple_lock (&mtxp->lock);
+ if (atomic_swap_acq (&mtxp->state, KMUTEX_CONTENDED) == KMUTEX_AVAIL)
+ {
+ /* The mutex was released in-between. */
+ simple_unlock (&mtxp->lock);
+ return (KERN_SUCCESS);
+ }
+
+ /* Sleep and check the result value of the waiting, in order to
+ * inform our caller if we were interrupted or not. Note that
+ * we don't need to set again the mutex state. The owner will
+ * handle that in every case. */
+ thread_sleep ((event_t)mtxp, (simple_lock_t)&mtxp->lock, interruptible);
+ return (current_thread()->wait_result == THREAD_AWAKENED ?
+ KERN_SUCCESS : KERN_INTERRUPTED);
+}
+
+kern_return_t kmutex_trylock (struct kmutex *mtxp)
+{
+ return (atomic_cas_acq (&mtxp->state, KMUTEX_AVAIL, KMUTEX_LOCKED) ?
+ KERN_SUCCESS : KERN_FAILURE);
+}
+
+void kmutex_unlock (struct kmutex *mtxp)
+{
+ if (atomic_cas_rel (&mtxp->state, KMUTEX_LOCKED, KMUTEX_AVAIL))
+ /* No waiters - We're done. */
+ return;
+
+ simple_lock (&mtxp->lock);
+
+ if (!thread_wakeup_one ((event_t)mtxp))
+ /* Any threads that were waiting on this mutex were
+ * interrupted and left - Reset the mutex state. */
+ mtxp->state = KMUTEX_AVAIL;
+
+ simple_unlock (&mtxp->lock);
+}
diff --git a/kern/kmutex.h b/kern/kmutex.h
new file mode 100644
index 00000000..29815156
--- /dev/null
+++ b/kern/kmutex.h
@@ -0,0 +1,52 @@
+/* Copyright (C) 2017 Free Software Foundation, Inc.
+ Contributed by Agustina Arzille <avarzille@riseup.net>, 2017.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either
+ version 2 of the license, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _KERN_KMUTEX_H_
+#define _KERN_KMUTEX_H_ 1
+
+#include <kern/lock.h>
+#include <mach/kern_return.h>
+
+struct kmutex
+{
+ unsigned int state;
+ decl_simple_lock_data (, lock)
+};
+
+/* Possible values for the mutex state. */
+#define KMUTEX_AVAIL 0
+#define KMUTEX_LOCKED 1
+#define KMUTEX_CONTENDED 2
+
+/* Initialize mutex in *MTXP. */
+extern void kmutex_init (struct kmutex *mtxp);
+
+/* Acquire lock MTXP. If INTERRUPTIBLE is true, the sleep may be
+ * prematurely terminated, in which case the function returns
+ * KERN_INTERRUPTED. Otherwise, KERN_SUCCESS is returned. */
+extern kern_return_t kmutex_lock (struct kmutex *mtxp,
+ boolean_t interruptible);
+
+/* Try to acquire the lock MTXP without sleeping.
+ * Returns KERN_SUCCESS if successful, KERN_FAILURE otherwise. */
+extern kern_return_t kmutex_trylock (struct kmutex *mtxp);
+
+/* Unlock the mutex MTXP. */
+extern void kmutex_unlock (struct kmutex *mtxp);
+
+#endif
diff --git a/kern/machine.h b/kern/machine.h
index c67213a2..5c55d2cd 100644
--- a/kern/machine.h
+++ b/kern/machine.h
@@ -54,5 +54,6 @@ extern kern_return_t processor_shutdown (processor_t);
* action_thread() shuts down processors or changes their assignment.
*/
extern void action_thread_continue (void) __attribute__((noreturn));
+extern void action_thread(void) __attribute__((noreturn));
#endif /* _MACHINE_H_ */
diff --git a/kern/profile.c b/kern/profile.c
index 1381b1a5..b33d6953 100644
--- a/kern/profile.c
+++ b/kern/profile.c
@@ -71,7 +71,7 @@ void profile_thread()
int arg[SIZE_PROF_BUFFER+1];
} msg;
- register spl_t s;
+ spl_t s;
buf_to_send_t buf_entry;
queue_entry_t prof_queue_entry;
prof_data_t pbuf;
@@ -113,7 +113,7 @@ void profile_thread()
else {
task_t curr_task;
thread_t curr_th;
- register int *sample;
+ int *sample;
int curr_buf;
int imax;
@@ -183,7 +183,7 @@ void
send_last_sample_buf(th)
thread_t th;
{
- register spl_t s;
+ spl_t s;
buf_to_send_t buf_entry;
vm_offset_t vm_buf_entry;
diff --git a/kern/sched.h b/kern/sched.h
index f82f9f56..588e0aa6 100644
--- a/kern/sched.h
+++ b/kern/sched.h
@@ -47,10 +47,10 @@
#if STAT_TIME
/*
- * Statistical timing uses microseconds as timer units. 18 bit shift
+ * Statistical timing uses microseconds as timer units. 17 bit shift
* yields priorities. PRI_SHIFT_2 isn't needed.
*/
-#define PRI_SHIFT 18
+#define PRI_SHIFT 17
#else /* STAT_TIME */
@@ -60,7 +60,7 @@
#include <machine/sched_param.h>
#endif /* STAT_TIME */
-#define NRQS 50 /* 50 run queues per cpu */
+#define NRQS 64 /* 64 run queues per cpu */
struct run_queue {
queue_head_t runq[NRQS]; /* one for each priority */
@@ -113,6 +113,7 @@ extern queue_head_t action_queue; /* assign/shutdown queue */
decl_simple_lock_data(extern,action_lock);
extern int min_quantum; /* defines max context switch rate */
+#define MIN_QUANTUM (hz / 33) /* context switch 33 times/second */
/*
* Default base priorities for threads.
@@ -165,13 +166,4 @@ MACRO_BEGIN \
(thread)->processor_set->sched_load; \
MACRO_END
-#if SIMPLE_CLOCK
-/*
- * sched_usec is an exponential average of number of microseconds
- * in a second for clock drift compensation.
- */
-
-extern int sched_usec;
-#endif /* SIMPLE_CLOCK */
-
#endif /* _KERN_SCHED_H_ */
diff --git a/kern/sched_prim.c b/kern/sched_prim.c
index bb767352..63a0437c 100644
--- a/kern/sched_prim.c
+++ b/kern/sched_prim.c
@@ -64,10 +64,6 @@ int min_quantum; /* defines max context switch rate */
unsigned sched_tick;
-#if SIMPLE_CLOCK
-int sched_usec;
-#endif /* SIMPLE_CLOCK */
-
thread_t sched_thread_id;
timer_elt_data_t recompute_priorities_timer;
@@ -153,15 +149,12 @@ void sched_init(void)
recompute_priorities_timer.fcn = recompute_priorities;
recompute_priorities_timer.param = NULL;
- min_quantum = hz / 10; /* context switch 10 times/second */
+ min_quantum = MIN_QUANTUM;
wait_queue_init();
pset_sys_bootstrap(); /* initialize processor mgmt. */
queue_init(&action_queue);
simple_lock_init(&action_lock);
sched_tick = 0;
-#if SIMPLE_CLOCK
- sched_usec = 0;
-#endif /* SIMPLE_CLOCK */
ast_init();
}
@@ -231,7 +224,7 @@ void assert_wait(
thread = current_thread();
if (thread->wait_event != 0) {
- panic("assert_wait: already asserted event %#x\n",
+ panic("assert_wait: already asserted event %p\n",
thread->wait_event);
}
s = splsched();
@@ -376,13 +369,14 @@ void clear_wait(
* and thread_wakeup_one.
*
*/
-void thread_wakeup_prim(
+boolean_t thread_wakeup_prim(
event_t event,
boolean_t one_thread,
int result)
{
queue_t q;
int index;
+ boolean_t woke = FALSE;
thread_t thread, next_th;
decl_simple_lock_data( , *lock);
spl_t s;
@@ -435,6 +429,7 @@ void thread_wakeup_prim(
break;
}
thread_unlock(thread);
+ woke = TRUE;
if (one_thread)
break;
}
@@ -442,6 +437,7 @@ void thread_wakeup_prim(
}
simple_unlock(lock);
splx(s);
+ return (woke);
}
/*
@@ -1086,21 +1082,8 @@ void compute_my_priority(
*/
void recompute_priorities(void *param)
{
-#if SIMPLE_CLOCK
- int new_usec;
-#endif /* SIMPLE_CLOCK */
-
sched_tick++; /* age usage one more time */
set_timeout(&recompute_priorities_timer, hz);
-#if SIMPLE_CLOCK
- /*
- * Compensate for clock drift. sched_usec is an
- * exponential average of the number of microseconds in
- * a second. It decays in the same fashion as cpu_usage.
- */
- new_usec = sched_usec_elapsed();
- sched_usec = (5*sched_usec + 3*new_usec)/8;
-#endif /* SIMPLE_CLOCK */
/*
* Wakeup scheduler thread.
*/
@@ -1347,17 +1330,12 @@ void thread_setrun(
/*
* Cause ast on processor if processor is on line.
- *
- * XXX Don't do this remotely to master because this will
- * XXX send an interprocessor interrupt, and that's too
- * XXX expensive for all the unparallelized U*x code.
*/
if (processor == current_processor()) {
ast_on(cpu_number(), AST_BLOCK);
}
- else if ((processor != master_processor) &&
- (processor->state != PROCESSOR_OFF_LINE)) {
- cause_ast_check(processor);
+ else if ((processor->state != PROCESSOR_OFF_LINE)) {
+ cause_ast_check(processor);
}
}
#else /* NCPUS > 1 */
diff --git a/kern/sched_prim.h b/kern/sched_prim.h
index dfb2f54b..405e5456 100644
--- a/kern/sched_prim.h
+++ b/kern/sched_prim.h
@@ -72,7 +72,7 @@ extern void thread_sleep(
simple_lock_t lock,
boolean_t interruptible);
extern void thread_wakeup(void); /* for function pointers */
-extern void thread_wakeup_prim(
+extern boolean_t thread_wakeup_prim(
event_t event,
boolean_t one_thread,
int result);
diff --git a/kern/task.c b/kern/task.c
index 1874af69..735b9e59 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -73,7 +73,7 @@ void task_init(void)
* Task_create must assign to kernel_task as a side effect,
* for other initialization. (:-()
*/
- (void) task_create(TASK_NULL, FALSE, &kernel_task);
+ (void) task_create_kernel(TASK_NULL, FALSE, &kernel_task);
(void) task_set_name(kernel_task, "gnumach");
vm_map_set_name(kernel_map, kernel_task->name);
}
@@ -83,6 +83,19 @@ kern_return_t task_create(
boolean_t inherit_memory,
task_t *child_task) /* OUT */
{
+ if (parent_task == TASK_NULL)
+ return KERN_INVALID_TASK;
+
+ return task_create_kernel (parent_task, inherit_memory,
+ child_task);
+}
+
+kern_return_t
+task_create_kernel(
+ task_t parent_task,
+ boolean_t inherit_memory,
+ task_t *child_task) /* OUT */
+{
task_t new_task;
processor_set_t pset;
#if FAST_TAS
@@ -189,14 +202,16 @@ kern_return_t task_create(
new_task);
else
snprintf (new_task->name, sizeof new_task->name, "(%.*s)",
- sizeof new_task->name - 3, parent_task->name);
+ (int) (sizeof new_task->name - 3), parent_task->name);
if (new_task_notification != NULL) {
task_reference (new_task);
task_reference (parent_task);
mach_notify_new_task (new_task_notification,
convert_task_to_port (new_task),
- convert_task_to_port (parent_task));
+ parent_task
+ ? convert_task_to_port (parent_task)
+ : IP_NULL);
}
ipc_task_enable(new_task);
@@ -1209,7 +1224,8 @@ void consider_task_collect(void)
task_collect_max_rate = hz;
if (task_collect_allowed &&
- (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
+ (sched_tick > (task_collect_last_tick +
+ task_collect_max_rate / (hz / 1)))) {
task_collect_last_tick = sched_tick;
task_collect_scan();
}
diff --git a/kern/task.h b/kern/task.h
index 2a4c28fc..0b746aff 100644
--- a/kern/task.h
+++ b/kern/task.h
@@ -140,6 +140,10 @@ extern kern_return_t task_create(
task_t parent_task,
boolean_t inherit_memory,
task_t *child_task);
+extern kern_return_t task_create_kernel(
+ task_t parent_task,
+ boolean_t inherit_memory,
+ task_t *child_task);
extern kern_return_t task_terminate(
task_t task);
extern kern_return_t task_suspend(
diff --git a/kern/thread.c b/kern/thread.c
index 0ac7c535..680e72c2 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -444,6 +444,17 @@ kern_return_t thread_create(
task_unlock(parent_task);
/*
+ * This thread will mosty probably start working, assume it
+ * will take its share of CPU, to avoid having to find it out
+ * slowly. Decaying will however fix that quickly if it actually
+ * does not work
+ */
+ new_thread->cpu_usage = TIMER_RATE * SCHED_SCALE /
+ (pset->load_average >= SCHED_SCALE ?
+ pset->load_average : SCHED_SCALE);
+ new_thread->sched_usage = TIMER_RATE * SCHED_SCALE;
+
+ /*
* Lock both the processor set and the task,
* so that the thread can be added to both
* simultaneously. Processor set must be
@@ -1527,13 +1538,6 @@ kern_return_t thread_info(
basic_info->cpu_usage = thread->cpu_usage /
(TIMER_RATE/TH_USAGE_SCALE);
basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
-#if SIMPLE_CLOCK
- /*
- * Clock drift compensation.
- */
- basic_info->cpu_usage =
- (basic_info->cpu_usage * 1000000)/sched_usec;
-#endif /* SIMPLE_CLOCK */
flags = 0;
if (thread->state & TH_SWAPPED)
@@ -1576,8 +1580,11 @@ kern_return_t thread_info(
else if (flavor == THREAD_SCHED_INFO) {
thread_sched_info_t sched_info;
- if (*thread_info_count < THREAD_SCHED_INFO_COUNT) {
- return KERN_INVALID_ARGUMENT;
+ /* Allow *thread_info_count to be one smaller than the
+ usual amount, because last_processor is a
+ new member that some callers might not know about. */
+ if (*thread_info_count < THREAD_SCHED_INFO_COUNT -1) {
+ return KERN_INVALID_ARGUMENT;
}
sched_info = (thread_sched_info_t) thread_info_out;
@@ -1605,6 +1612,12 @@ kern_return_t thread_info(
sched_info->depressed = (thread->depress_priority >= 0);
sched_info->depress_priority = thread->depress_priority;
+#if NCPUS > 1
+ sched_info->last_processor = thread->last_processor;
+#else
+ sched_info->last_processor = 0;
+#endif
+
thread_unlock(thread);
splx(s);
@@ -2257,7 +2270,7 @@ thread_wire(
void thread_collect_scan(void)
{
- register thread_t thread, prev_thread;
+ thread_t thread, prev_thread;
processor_set_t pset, prev_pset;
prev_thread = THREAD_NULL;
@@ -2333,7 +2346,8 @@ void consider_thread_collect(void)
if (thread_collect_allowed &&
(sched_tick >
- (thread_collect_last_tick + thread_collect_max_rate))) {
+ (thread_collect_last_tick +
+ thread_collect_max_rate / (hz / 1)))) {
thread_collect_last_tick = sched_tick;
thread_collect_scan();
}
diff --git a/linux/Makefrag.am b/linux/Makefrag.am
index 1b690108..38718a3f 100644
--- a/linux/Makefrag.am
+++ b/linux/Makefrag.am
@@ -43,7 +43,7 @@ liblinux_a_CFLAGS += \
# TODO. Do we really need `-traditional'?
liblinux_a_CCASFLAGS = $(AM_CCASFLAGS) \
- -D__ASSEMBLY__ -traditional \
+ -traditional \
$(liblinux_a_CPPFLAGS)
liblinux_a_SOURCES = \
diff --git a/linux/configfrag.ac b/linux/configfrag.ac
index 882af6bc..78b59d7f 100644
--- a/linux/configfrag.ac
+++ b/linux/configfrag.ac
@@ -162,7 +162,8 @@ AC_Linux_DRIVER_nodef([53c78xx],
[SCSI controller NCR 53C7,8xx],
[CONFIG_SCSI_NCR53C7xx],
[scsi])
-AC_Linux_DRIVER([AM53C974],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([AM53C974],
[SCSI controller AM53/79C974 (am53c974, am79c974)],
[CONFIG_SCSI_AM53C974],
[scsi])
@@ -189,7 +190,8 @@ AC_Linux_DRIVER([aha152x],
[SCSI controller Adaptec AHA-152x/2825 (aha152x, aha2825)],
[CONFIG_SCSI_AHA152X],
[scsi])
-AC_Linux_DRIVER([aha1542],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([aha1542],
[SCSI controller Adaptec AHA-1542],
[CONFIG_SCSI_AHA1542],
[scsi])
@@ -206,7 +208,8 @@ AC_Linux_DRIVER([dtc],
[SCSI controller DTC3180/3280 (dtc3180, dtc3280)],
[CONFIG_SCSI_DTC3280],
[scsi])
-AC_Linux_DRIVER([eata],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([eata],
[SCSI controller EATA ISA/EISA/PCI
(DPT and generic EATA/DMA-compliant boards)],
[CONFIG_SCSI_EATA],
@@ -216,7 +219,8 @@ AC_Linux_DRIVER_nodef([eata_dma],
[SCSI controller EATA-DMA (DPT, NEC, AT&T, SNI, AST, Olivetti, Alphatronix)],
[CONFIG_SCSI_EATA_DMA],
[scsi])
-AC_Linux_DRIVER([eata_pio],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([eata_pio],
[SCSI controller EATA-PIO (old DPT PM2001, PM2012A)],
[CONFIG_SCSI_EATA_PIO],
[scsi])
@@ -246,7 +250,8 @@ AC_Linux_DRIVER([pas16],
[SCSI controller PAS16],
[CONFIG_SCSI_PASS16],
[scsi])
-AC_Linux_DRIVER([ppa],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([ppa],
[IOMEGA Parallel Port ZIP drive],
[CONFIG_SCSI_PPA],
[scsi])
@@ -282,7 +287,8 @@ AC_Linux_DRIVER([ultrastor],
[SCSI controller UltraStor],
[CONFIG_SCSI_ULTRASTOR],
[scsi])
-AC_Linux_DRIVER([wd7000],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([wd7000],
[SCSI controller WD 7000],
[CONFIG_SCSI_7000FASST],
[scsi])
@@ -316,7 +322,8 @@ AC_Linux_DRIVER([3c59x],
"Vortex/Boomerang"],
[CONFIG_VORTEX],
[net])
-AC_Linux_DRIVER([3c515],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([3c515],
[Ethernet controller 3Com 515 ISA Fast EtherLink],
[CONFIG_3C515],
[net])
@@ -344,11 +351,13 @@ AC_Linux_DRIVER([de4x5],
[Ethernet controller DE4x5 (de4x5, de425, de434, de435, de450, de500)],
[CONFIG_DE4X5],
[net])
-AC_Linux_DRIVER([de600],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([de600],
[Ethernet controller D-Link DE-600],
[CONFIG_DE600],
[net])
-AC_Linux_DRIVER([de620],
+# Disabled by default.
+AC_Linux_DRIVER_nodef([de620],
[Ethernet controller D-Link DE-620],
[CONFIG_DE620],
[net])
diff --git a/linux/dev/arch/i386/kernel/irq.c b/linux/dev/arch/i386/kernel/irq.c
index b7dfa1a9..36af3fe3 100644
--- a/linux/dev/arch/i386/kernel/irq.c
+++ b/linux/dev/arch/i386/kernel/irq.c
@@ -63,13 +63,6 @@ unsigned int local_irq_count[NR_CPUS];
int EISA_bus = 0;
/*
- * Priority at which a Linux handler should be called.
- * This is used at the time of an IRQ allocation. It is
- * set by emulation routines for each class of device.
- */
-spl_t linux_intr_pri;
-
-/*
* Flag indicating an interrupt is being handled.
*/
unsigned int intr_count = 0;
@@ -129,7 +122,7 @@ linux_intr (int irq)
mark_intr_removed (irq, action->delivery_port);
ipc_port_release (action->delivery_port);
*prev = action->next;
- printk ("irq handler %d: release an dead delivery port\n", irq);
+ printk ("irq handler %d: release a dead delivery port\n", irq);
linux_kfree(action);
action = *prev;
continue;
@@ -160,18 +153,15 @@ linux_intr (int irq)
static inline void
mask_irq (unsigned int irq_nr)
{
- int i;
+ int new_pic_mask = curr_pic_mask | 1 << irq_nr;
- for (i = 0; i < intpri[irq_nr]; i++)
- pic_mask[i] |= 1 << irq_nr;
-
- if (curr_pic_mask != pic_mask[curr_ipl])
+ if (curr_pic_mask != new_pic_mask)
{
- curr_pic_mask = pic_mask[curr_ipl];
+ curr_pic_mask = new_pic_mask;
if (irq_nr < 8)
- outb (curr_pic_mask & 0xff, PIC_MASTER_OCW);
+ outb (curr_pic_mask & 0xff, PIC_MASTER_OCW);
else
- outb (curr_pic_mask >> 8, PIC_SLAVE_OCW);
+ outb (curr_pic_mask >> 8, PIC_SLAVE_OCW);
}
}
@@ -181,18 +171,18 @@ mask_irq (unsigned int irq_nr)
static inline void
unmask_irq (unsigned int irq_nr)
{
- int mask, i;
+ int mask;
+ int new_pic_mask;
mask = 1 << irq_nr;
if (irq_nr >= 8)
mask |= 1 << 2;
- for (i = 0; i < intpri[irq_nr]; i++)
- pic_mask[i] &= ~mask;
+ new_pic_mask = curr_pic_mask & ~mask;
- if (curr_pic_mask != pic_mask[curr_ipl])
+ if (curr_pic_mask != new_pic_mask)
{
- curr_pic_mask = pic_mask[curr_ipl];
+ curr_pic_mask = new_pic_mask;
if (irq_nr < 8)
outb (curr_pic_mask & 0xff, PIC_MASTER_OCW);
else
@@ -200,8 +190,13 @@ unmask_irq (unsigned int irq_nr)
}
}
+/* Count how many subsystems requested to disable each IRQ */
+static unsigned ndisabled_irq[NR_IRQS];
+
+/* These disable/enable IRQs for real after counting how many subsystems
+ * requested that */
void
-disable_irq (unsigned int irq_nr)
+__disable_irq (unsigned int irq_nr)
{
unsigned long flags;
@@ -209,12 +204,15 @@ disable_irq (unsigned int irq_nr)
save_flags (flags);
cli ();
- mask_irq (irq_nr);
+ ndisabled[irq_nr]++;
+ assert (ndisabled[irq_nr] > 0);
+ if (ndisabled[irq_nr] == 1)
+ mask_irq (irq_nr);
restore_flags (flags);
}
void
-enable_irq (unsigned int irq_nr)
+__enable_irq (unsigned int irq_nr)
{
unsigned long flags;
@@ -222,17 +220,47 @@ enable_irq (unsigned int irq_nr)
save_flags (flags);
cli ();
- unmask_irq (irq_nr);
+ assert (ndisabled[irq_nr] > 0);
+ ndisabled[irq_nr]--;
+ if (ndisabled[irq_nr] == 0)
+ unmask_irq (irq_nr);
+ restore_flags (flags);
+}
+
+/* IRQ mask according to Linux drivers */
+static unsigned linux_pic_mask;
+
+/* These only record that Linux requested to mask IRQs */
+void
+disable_irq (unsigned int irq_nr)
+{
+ unsigned long flags;
+ unsigned mask = 1U << irq_nr;
+
+ save_flags (flags);
+ cli ();
+ if (!(linux_pic_mask & mask))
+ {
+ linux_pic_mask |= mask;
+ __disable_irq(irq_nr);
+ }
restore_flags (flags);
}
-/*
- * Default interrupt handler for Linux.
- */
void
-linux_bad_intr (int irq)
+enable_irq (unsigned int irq_nr)
{
- mask_irq (irq);
+ unsigned long flags;
+ unsigned mask = 1U << irq_nr;
+
+ save_flags (flags);
+ cli ();
+ if (linux_pic_mask & mask)
+ {
+ linux_pic_mask &= ~mask;
+ __enable_irq(irq_nr);
+ }
+ restore_flags (flags);
}
static int
@@ -253,10 +281,6 @@ setup_x86_irq (int irq, struct linux_action *new)
if ((old->flags ^ new->flags) & SA_INTERRUPT)
return (-EBUSY);
- /* Can't share at different levels */
- if (intpri[irq] && linux_intr_pri != intpri[irq])
- return (-EBUSY);
-
/* add new interrupt at end of irq queue */
do
{
@@ -276,7 +300,6 @@ setup_x86_irq (int irq, struct linux_action *new)
{
ivect[irq] = linux_intr;
iunit[irq] = irq;
- intpri[irq] = linux_intr_pri;
unmask_irq (irq);
}
restore_flags (flags);
@@ -388,9 +411,8 @@ free_irq (unsigned int irq, void *dev_id)
if (!irq_action[irq])
{
mask_irq (irq);
- ivect[irq] = linux_bad_intr;
+ ivect[irq] = intnull;
iunit[irq] = irq;
- intpri[irq] = SPL0;
}
restore_flags (flags);
linux_kfree (action);
@@ -416,9 +438,8 @@ probe_irq_on (void)
*/
for (i = 15; i > 0; i--)
{
- if (!irq_action[i] && ivect[i] == linux_bad_intr)
+ if (!irq_action[i] && ivect[i] == intnull)
{
- intpri[i] = linux_intr_pri;
enable_irq (i);
irqs |= 1 << i;
}
@@ -450,10 +471,9 @@ probe_irq_off (unsigned long irqs)
*/
for (i = 15; i > 0; i--)
{
- if (!irq_action[i] && ivect[i] == linux_bad_intr)
+ if (!irq_action[i] && ivect[i] == intnull)
{
disable_irq (i);
- intpri[i] = SPL0;
}
}
@@ -472,6 +492,18 @@ probe_irq_off (unsigned long irqs)
* Reserve IRQs used by Mach drivers.
* Must be called before Linux IRQ detection, after Mach IRQ detection.
*/
+
+static void reserved_mach_handler (int line, void *cookie, struct pt_regs *regs)
+{
+ /* These interrupts are actually handled in Mach. */
+ assert (! "reached");
+}
+
+static const struct linux_action reserved_mach =
+ {
+ reserved_mach_handler, NULL, NULL, 0
+ };
+
static void
reserve_mach_irqs (void)
{
@@ -479,9 +511,11 @@ reserve_mach_irqs (void)
for (i = 0; i < 16; i++)
{
- if (ivect[i] != prtnull && ivect[i] != intnull)
- /* Set non-NULL value. */
- irq_action[i] = (struct linux_action *) -1;
+ if (ivect[i] != intnull)
+ /* This dummy action does not specify SA_SHIRQ, so
+ setup_x86_irq will not try to add a handler to this
+ slot. Therefore, the cast is safe. */
+ irq_action[i] = (struct linux_action *) &reserved_mach;
}
}
@@ -756,12 +790,10 @@ void __global_restore_flags(unsigned long flags)
#endif
static void (*old_clock_handler) ();
-static int old_clock_pri;
void
init_IRQ (void)
{
- int i;
char *p;
int latch = (CLKNUM + hz / 2) / hz;
@@ -781,29 +813,10 @@ init_IRQ (void)
* Install our clock interrupt handler.
*/
old_clock_handler = ivect[0];
- old_clock_pri = intpri[0];
ivect[0] = linux_timer_intr;
- intpri[0] = SPLHI;
reserve_mach_irqs ();
- for (i = 1; i < 16; i++)
- {
- /*
- * irq2 and irq13 should be igonored.
- */
- if (i == 2 || i == 13)
- continue;
- if (ivect[i] == prtnull || ivect[i] == intnull)
- {
- ivect[i] = linux_bad_intr;
- iunit[i] = i;
- intpri[i] = SPL0;
- }
- }
-
- form_pic_mask ();
-
/*
* Enable interrupts.
*/
@@ -841,7 +854,5 @@ restore_IRQ (void)
* Restore clock interrupt handler.
*/
ivect[0] = old_clock_handler;
- intpri[0] = old_clock_pri;
- form_pic_mask ();
}
diff --git a/linux/dev/drivers/block/ahci.c b/linux/dev/drivers/block/ahci.c
index b60f1a19..6d5c2858 100644
--- a/linux/dev/drivers/block/ahci.c
+++ b/linux/dev/drivers/block/ahci.c
@@ -702,9 +702,9 @@ static int ahci_identify(const volatile struct ahci_host *ahci_host, const volat
}
}
if (port->capacity/2048 >= 10240)
- printk("sd%u: %s, %uGB w/%dkB Cache\n", port - ports, id.model, (unsigned) (port->capacity/(2048*1024)), id.buf_size/2);
+ printk("sd%u: %s, %uGB w/%dkB Cache\n", (unsigned) (port - ports), id.model, (unsigned) (port->capacity/(2048*1024)), id.buf_size/2);
else
- printk("sd%u: %s, %uMB w/%dkB Cache\n", port - ports, id.model, (unsigned) (port->capacity/2048), id.buf_size/2);
+ printk("sd%u: %s, %uMB w/%dkB Cache\n", (unsigned) (port - ports), id.model, (unsigned) (port->capacity/2048), id.buf_size/2);
}
port->identify = 0;
@@ -755,7 +755,7 @@ static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const vo
timeout = jiffies + WAIT_MAX;
while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
if (jiffies > timeout) {
- printk("sd%u: timeout waiting for list completion\n", port-ports);
+ printk("sd%u: timeout waiting for list completion\n", (unsigned) (port-ports));
port->ahci_host = NULL;
port->ahci_port = NULL;
return;
@@ -765,7 +765,7 @@ static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const vo
timeout = jiffies + WAIT_MAX;
while (readl(&ahci_port->cmd) & PORT_CMD_FIS_ON)
if (jiffies > timeout) {
- printk("sd%u: timeout waiting for FIS completion\n", port-ports);
+ printk("sd%u: timeout waiting for FIS completion\n", (unsigned) (port-ports));
port->ahci_host = NULL;
port->ahci_port = NULL;
return;
@@ -796,7 +796,7 @@ static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const vo
timeout = jiffies + WAIT_MAX;
while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
if (jiffies > timeout) {
- printk("sd%u: timeout waiting for list completion\n", port-ports);
+ printk("sd%u: timeout waiting for list completion\n", (unsigned) (port-ports));
port->ahci_host = NULL;
port->ahci_port = NULL;
return;
@@ -829,6 +829,8 @@ static void ahci_probe_dev(unsigned char bus, unsigned char device)
printk("ahci: %02x:%02x.%x: Can not read configuration", bus, dev, fun);
return;
}
+ /* Ignore multifunction bit */
+ hdrtype &= ~0x80;
if (hdrtype != 0) {
printk("ahci: %02x:%02x.%x: Unknown hdrtype %d\n", bus, dev, fun, hdrtype);
diff --git a/linux/dev/drivers/block/genhd.c b/linux/dev/drivers/block/genhd.c
index 4a36f7ff..ee1ac32b 100644
--- a/linux/dev/drivers/block/genhd.c
+++ b/linux/dev/drivers/block/genhd.c
@@ -774,9 +774,6 @@ void device_setup(void)
char *c, *param, *white;
struct gendisk *p;
int nr=0;
-#ifdef MACH
- linux_intr_pri = SPL6;
-#endif
for (c = kernel_cmdline; c; )
{
@@ -809,9 +806,6 @@ void device_setup(void)
scsi_dev_init();
#endif
#ifdef CONFIG_INET
-#ifdef MACH
- linux_intr_pri = SPL6;
-#endif
extern char *kernel_cmdline;
if (!strstr(kernel_cmdline, " nonetdev"))
net_dev_init();
diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c
index c1d922b6..a8cb9b3f 100644
--- a/linux/dev/glue/block.c
+++ b/linux/dev/glue/block.c
@@ -578,6 +578,7 @@ rdwr_full (int rw, kdev_t dev, loff_t *off, char **buf, int *resid, int bshift)
int cc, err = 0, i, j, nb, nbuf;
long blk;
struct buffer_head bhead[MAX_BUF], *bh, *bhp[MAX_BUF];
+ phys_addr_t pa;
assert ((*off & BMASK) == 0);
@@ -592,7 +593,10 @@ rdwr_full (int rw, kdev_t dev, loff_t *off, char **buf, int *resid, int bshift)
if (rw == WRITE)
set_bit (BH_Dirty, &bh->b_state);
cc = PAGE_SIZE - (((int) *buf + (nb << bshift)) & PAGE_MASK);
- if (cc >= BSIZE && (((int) *buf + (nb << bshift)) & 511) == 0)
+ pa = pmap_extract (vm_map_pmap (device_io_map),
+ (((vm_offset_t) *buf) + (nb << bshift)));
+ if (cc >= BSIZE && (((int) *buf + (nb << bshift)) & 511) == 0
+ && pa + cc <= VM_PAGE_DIRECTMAP_LIMIT)
cc &= ~BMASK;
else
{
@@ -602,9 +606,7 @@ rdwr_full (int rw, kdev_t dev, loff_t *off, char **buf, int *resid, int bshift)
if (cc > ((nbuf - nb) << bshift))
cc = (nbuf - nb) << bshift;
if (! test_bit (BH_Bounce, &bh->b_state))
- bh->b_data = (char *) phystokv(pmap_extract (vm_map_pmap (device_io_map),
- (((vm_offset_t) *buf)
- + (nb << bshift))));
+ bh->b_data = (char *) phystokv(pa);
else
{
bh->b_data = alloc_buffer (cc);
@@ -941,7 +943,6 @@ init_partition (struct name_map *np, kdev_t *dev,
if (gd->part[MINOR (d->inode.i_rdev)].nr_sects <= 0
|| gd->part[MINOR (d->inode.i_rdev)].start_sect < 0)
continue;
- linux_intr_pri = SPL6;
d->file.f_flags = 0;
d->file.f_mode = O_RDONLY;
if (ds->fops->open && (*ds->fops->open) (&d->inode, &d->file))
@@ -1087,7 +1088,6 @@ device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
if (ds->fops->open)
{
td.inode.i_rdev = dev;
- linux_intr_pri = SPL6;
err = (*ds->fops->open) (&td.inode, &td.file);
if (err)
{
@@ -1303,7 +1303,7 @@ device_write (void *d, ipc_port_t reply_port,
int resid, amt, i;
int count = (int) orig_count;
io_return_t err = 0;
- vm_map_copy_t copy;
+ vm_map_copy_t copy = (vm_map_copy_t) data;
vm_offset_t addr, uaddr;
vm_size_t len, size;
struct block_data *bd = d;
@@ -1327,7 +1327,6 @@ device_write (void *d, ipc_port_t reply_port,
}
resid = count;
- copy = (vm_map_copy_t) data;
uaddr = copy->offset;
/* Allocate a kernel buffer. */
diff --git a/linux/dev/glue/glue.h b/linux/dev/glue/glue.h
index 8cb118cc..e94ff556 100644
--- a/linux/dev/glue/glue.h
+++ b/linux/dev/glue/glue.h
@@ -23,7 +23,6 @@
#include <mach/machine/vm_types.h>
extern int linux_auto_config;
-extern int linux_intr_pri;
extern unsigned long alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
extern void free_contig_mem (vm_page_t, unsigned);
@@ -33,7 +32,6 @@ extern void linux_kmem_init (void);
extern void linux_net_emulation_init (void);
extern void device_setup (void);
extern void linux_timer_intr (void);
-extern void linux_bad_intr (int);
extern void linux_sched_init (void);
extern void pcmcia_init (void);
extern void linux_soft_intr (void);
diff --git a/linux/dev/glue/net.c b/linux/dev/glue/net.c
index 6b9cadd5..dd80622c 100644
--- a/linux/dev/glue/net.c
+++ b/linux/dev/glue/net.c
@@ -380,7 +380,6 @@ device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
if (dev->open)
{
- linux_intr_pri = SPL6;
if ((*dev->open) (dev))
err = D_NO_SUCH_DEVICE;
}
@@ -427,8 +426,7 @@ device_write (void *d, ipc_port_t reply_port,
recnum_t bn, io_buf_ptr_t data, unsigned int count,
int *bytes_written)
{
- unsigned char *p;
- int i, s;
+ int s;
vm_map_copy_t copy = (vm_map_copy_t) data;
char *map_data;
vm_offset_t map_addr;
diff --git a/linux/dev/include/asm-i386/system.h b/linux/dev/include/asm-i386/system.h
index f26a33e7..41eb65a4 100644
--- a/linux/dev/include/asm-i386/system.h
+++ b/linux/dev/include/asm-i386/system.h
@@ -1,6 +1,8 @@
#ifndef __ASM_SYSTEM_H
#define __ASM_SYSTEM_H
+#include <i386/ipl.h> /* curr_ipl, splx */
+
#include <asm/segment.h>
/*
@@ -223,10 +225,8 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define mb() __asm__ __volatile__ ("" : : :"memory")
#define __sti() __asm__ __volatile__ ("sti": : :"memory")
#define __cli() __asm__ __volatile__ ("cli": : :"memory")
-#define __save_flags(x) \
-__asm__ __volatile__("pushf ; pop %0" : "=r" (x): /* no input */ :"memory")
-#define __restore_flags(x) \
-__asm__ __volatile__("push %0 ; popf": /* no output */ :"g" (x):"memory")
+#define __save_flags(x) (x = ((curr_ipl > 0) ? 0 : (1 << 9)))
+#define __restore_flags(x) splx((x & (1 << 9)) ? 0 : 7)
#ifdef __SMP__
diff --git a/linux/dev/init/main.c b/linux/dev/init/main.c
index 3740c12c..6d853957 100644
--- a/linux/dev/init/main.c
+++ b/linux/dev/init/main.c
@@ -151,7 +151,6 @@ linux_init (void)
linux_net_emulation_init ();
#endif
- cli ();
device_setup ();
#ifdef CONFIG_PCMCIA
diff --git a/linux/dev/kernel/sched.c b/linux/dev/kernel/sched.c
index 87906a45..2a9eeb3f 100644
--- a/linux/dev/kernel/sched.c
+++ b/linux/dev/kernel/sched.c
@@ -622,6 +622,6 @@ linux_timer_intr (void)
mark_bh (TQUEUE_BH);
#if 0
if (linux_timer_print)
- printf ("linux_timer_intr: pic_mask[0] %x\n", pic_mask[0]);
+ printf ("linux_timer_intr: hello\n");
#endif
}
diff --git a/linux/pcmcia-cs/modules/ds.c b/linux/pcmcia-cs/modules/ds.c
index e4579eaf..f2f33417 100644
--- a/linux/pcmcia-cs/modules/ds.c
+++ b/linux/pcmcia-cs/modules/ds.c
@@ -940,7 +940,7 @@ int __init init_pcmcia_ds(void)
return -EINVAL;
}
if (serv.Count == 0) {
- printk(KERN_NOTICE "ds: no socket drivers loaded!\n");
+ printk(KERN_NOTICE "ds: no socket drivers\n");
return -1;
}
diff --git a/linux/src/drivers/block/ide.c b/linux/src/drivers/block/ide.c
index dc20fcba..170e4e13 100644
--- a/linux/src/drivers/block/ide.c
+++ b/linux/src/drivers/block/ide.c
@@ -395,6 +395,7 @@ static void init_hwif_data (unsigned int index)
/* fill in any non-zero initial values */
hwif->index = index;
hwif->io_base = default_io_base[index];
+ hwif->irq = default_irqs[index];
hwif->ctl_port = hwif->io_base ? hwif->io_base+0x206 : 0x000;
#ifdef CONFIG_BLK_DEV_HD
if (hwif->io_base == HD_DATA)
@@ -2702,6 +2703,8 @@ static int try_to_identify (ide_drive_t *drive, byte cmd)
int irq_off;
if (!HWIF(drive)->irq) { /* already got an IRQ? */
+ printk("%s: Not probing legacy IRQs)\n", drive->name);
+ return 2;
probe_irq_off(probe_irq_on()); /* clear dangling irqs */
irqs_on = probe_irq_on(); /* start monitoring irqs */
OUT_BYTE(drive->ctl,IDE_CONTROL_REG); /* enable device irq */
diff --git a/linux/src/drivers/block/triton.c b/linux/src/drivers/block/triton.c
index 37eff2b3..f4633d21 100644
--- a/linux/src/drivers/block/triton.c
+++ b/linux/src/drivers/block/triton.c
@@ -226,6 +226,14 @@ static int config_drive_for_dma (ide_drive_t *drive)
return 1; /* DMA disabled */
}
}
+
+ if (!strcmp("QEMU HARDDISK", id->model)) {
+ /* Virtual disks don't have issues with DMA :) */
+ drive->using_dma = 1;
+ /* And keep enabled even if some requests time out due to emulation lag. */
+ drive->keep_settings = 1;
+ return 1; /* DMA enabled */
+ }
/* Enable DMA on any drive that has mode 4 or 2 UltraDMA enabled */
if (id->field_valid & 4) { /* UltraDMA */
/* Enable DMA on any drive that has mode 4 UltraDMA enabled */
diff --git a/linux/src/drivers/scsi/NCR53c406a.c b/linux/src/drivers/scsi/NCR53c406a.c
index 9f2de9a0..7745f5ad 100644
--- a/linux/src/drivers/scsi/NCR53c406a.c
+++ b/linux/src/drivers/scsi/NCR53c406a.c
@@ -525,7 +525,7 @@ NCR53c406a_detect(Scsi_Host_Template * tpnt){
#ifndef IRQ_LEV
if (irq_level < 0) { /* LILO override if >= 0*/
- irq_level=irq_probe();
+ irq_level = -1; // XXX No probing irq_probe();
if (irq_level < 0) { /* Trouble */
printk("NCR53c406a: IRQ problem, irq_level=%d, giving up\n", irq_level);
return 0;
diff --git a/linux/src/include/linux/compiler-gcc.h b/linux/src/include/linux/compiler-gcc.h
index b1a0be0c..d9426dfc 100644
--- a/linux/src/include/linux/compiler-gcc.h
+++ b/linux/src/include/linux/compiler-gcc.h
@@ -93,7 +93,11 @@
#define __gcc_header(x) #x
#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
#define gcc_header(x) _gcc_header(x)
+#if __GNUC__ < 5
#include gcc_header(__GNUC__)
+#else
+#include gcc_header(5)
+#endif
#if !defined(__noclone)
#define __noclone /* not needed */
diff --git a/linux/src/include/linux/compiler-gcc6.h b/linux/src/include/linux/compiler-gcc6.h
deleted file mode 100644
index cc2e86a9..00000000
--- a/linux/src/include/linux/compiler-gcc6.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc6.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#define __HAVE_BUILTIN_BSWAP16__
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
-
-#define KASAN_ABI_VERSION 4
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 99381efd..4d1d90a4 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -651,7 +651,7 @@ vm_fault_return_t vm_fault_page(
object->pager_request,
m->offset + object->paging_offset,
PAGE_SIZE, access_required)) != KERN_SUCCESS) {
- if (rc != MACH_SEND_INTERRUPTED)
+ if (object->pager && rc != MACH_SEND_INTERRUPTED)
printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n",
"memory_object_data_request",
object->pager,
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 855d7997..ffc8934b 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -102,7 +102,7 @@ MACRO_END
* are now "top level" maps (either task map, kernel map or submap
* of the kernel map).
*
- * Since portions of maps are specified by start/end addreses,
+ * Since portions of maps are specified by start/end addresses,
* which may not align with existing map entries, all
* routines merely "clip" entries to these start/end values.
* [That is, an entry is split into two, bordering at a
@@ -216,7 +216,7 @@ vm_map_t vm_map_create(
result = (vm_map_t) kmem_cache_alloc(&vm_map_cache);
if (result == VM_MAP_NULL)
- panic("vm_map_create");
+ return VM_MAP_NULL;
vm_map_setup(result, pmap, min, max);
@@ -685,7 +685,7 @@ restart:
start = (map->min_offset + mask) & ~mask;
end = start + size;
- if ((end <= start) || (end > map->max_offset)) {
+ if ((start < map->min_offset) || (end <= start) || (end > map->max_offset)) {
goto error;
}
@@ -699,7 +699,8 @@ restart:
start = (entry->vme_end + mask) & ~mask;
end = start + size;
- if ((end > start)
+ if ((start >= entry->vme_end)
+ && (end > start)
&& (end <= map->max_offset)
&& (end <= (entry->vme_end + entry->gap_size))) {
*startp = start;
@@ -738,6 +739,7 @@ restart:
assert(entry->gap_size >= max_size);
start = (entry->vme_end + mask) & ~mask;
+ assert(start >= entry->vme_end);
end = start + size;
assert(end > start);
assert(end <= (entry->vme_end + entry->gap_size));
@@ -2793,7 +2795,7 @@ kern_return_t vm_map_copyout(
m = vm_page_lookup(object, offset);
if (m == VM_PAGE_NULL || m->wire_count == 0 ||
m->absent)
- panic("vm_map_copyout: wiring 0x%x", m);
+ panic("vm_map_copyout: wiring %p", m);
m->busy = TRUE;
vm_object_unlock(object);
@@ -4245,6 +4247,10 @@ vm_map_t vm_map_fork(vm_map_t old_map)
new_map = vm_map_create(new_pmap,
old_map->min_offset,
old_map->max_offset);
+ if (new_map == VM_MAP_NULL) {
+ pmap_destroy(new_pmap);
+ return VM_MAP_NULL;
+ }
for (
old_entry = vm_map_first_entry(old_map);
@@ -4862,6 +4868,37 @@ kern_return_t vm_map_machine_attribute(
return ret;
}
+/*
+ * Routine: vm_map_msync
+ * Purpose:
+ * Synchronize out pages of the given map out to their memory
+ * manager, if any.
+ */
+kern_return_t vm_map_msync(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_size_t size,
+ vm_sync_t sync_flags)
+{
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if ((sync_flags & (VM_SYNC_ASYNCHRONOUS | VM_SYNC_SYNCHRONOUS)) ==
+ (VM_SYNC_ASYNCHRONOUS | VM_SYNC_SYNCHRONOUS))
+ return KERN_INVALID_ARGUMENT;
+
+ size = round_page(address + size) - trunc_page(address);
+ address = trunc_page(address);
+
+ if (size == 0)
+ return KERN_SUCCESS;
+
+ /* TODO */
+
+ return KERN_INVALID_ARGUMENT;
+}
+
+
#if MACH_KDB
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 87660f31..2561ec44 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -47,6 +47,7 @@
#include <mach/vm_prot.h>
#include <mach/vm_inherit.h>
#include <mach/vm_wire.h>
+#include <mach/vm_sync.h>
#include <vm/pmap.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
@@ -443,6 +444,9 @@ extern kern_return_t vm_map_machine_attribute(vm_map_t, vm_offset_t,
vm_machine_attribute_t,
vm_machine_attribute_val_t *);
+extern kern_return_t vm_map_msync(vm_map_t,
+ vm_offset_t, vm_size_t, vm_sync_t);
+
/* Delete entry from map */
extern void vm_map_entry_delete(vm_map_t, vm_map_entry_t);
diff --git a/vm/vm_object.c b/vm/vm_object.c
index ea81039d..a68e5209 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -372,7 +372,7 @@ static void vm_object_cache_remove(
}
void vm_object_collect(
- register vm_object_t object)
+ vm_object_t object)
{
vm_object_unlock(object);
@@ -1920,8 +1920,6 @@ void vm_object_destroy(
vm_object_deallocate(object);
}
-boolean_t vm_object_accept_old_init_protocol = FALSE;
-
/*
* Routine: vm_object_enter
* Purpose:
@@ -2109,9 +2107,6 @@ restart:
vm_object_lock(object);
object->pager_initialized = TRUE;
- if (vm_object_accept_old_init_protocol)
- object->pager_ready = TRUE;
-
vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
} else {
vm_object_lock(object);
diff --git a/vm/vm_object.h b/vm/vm_object.h
index f8f9bf8d..80d449a0 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -396,4 +396,18 @@ MACRO_END
extern int vm_object_external_count;
extern int vm_object_external_pages;
+/* Add a reference to a locked VM object. */
+static inline int
+vm_object_reference_locked (vm_object_t obj)
+{
+ return (++obj->ref_count);
+}
+
+/* Remove a reference from a locked VM object. */
+static inline int
+vm_object_unreference_locked (vm_object_t obj)
+{
+ return (--obj->ref_count);
+}
+
#endif /* _VM_VM_OBJECT_H_ */
diff --git a/vm/vm_user.c b/vm/vm_user.c
index d29bbb23..2f41d322 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -39,8 +39,10 @@
#include <mach/vm_param.h>
#include <mach/vm_statistics.h>
#include <mach/vm_cache_statistics.h>
+#include <mach/vm_sync.h>
#include <kern/host.h>
#include <kern/task.h>
+#include <kern/mach.server.h>
#include <vm/vm_fault.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
@@ -480,6 +482,56 @@ kern_return_t vm_wire_all(const ipc_port_t port, vm_map_t map, vm_wire_t flags)
return vm_map_pageable_all(map, flags);
}
+/*
+ * vm_object_sync synchronizes out pages from the memory object to its
+ * memory manager, if any.
+ */
+kern_return_t vm_object_sync(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ boolean_t should_flush,
+ boolean_t should_return,
+ boolean_t should_iosync)
+{
+ if (object == VM_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /* FIXME: we should rather introduce an internal function, e.g.
+ vm_object_update, rather than calling memory_object_lock_request. */
+ vm_object_reference(object);
+
+ /* This is already always synchronous for now. */
+ (void) should_iosync;
+
+ size = round_page(offset + size) - trunc_page(offset);
+ offset = trunc_page(offset);
+
+ return memory_object_lock_request(object, offset, size,
+ should_return ?
+ MEMORY_OBJECT_RETURN_ALL :
+ MEMORY_OBJECT_RETURN_NONE,
+ should_flush,
+ VM_PROT_NO_CHANGE,
+ NULL, 0);
+}
+
+/*
+ * vm_msync synchronizes out pages from the map to their memory manager,
+ * if any.
+ */
+kern_return_t vm_msync(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ vm_sync_t sync_flags)
+{
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ return vm_map_msync(map, (vm_offset_t) address, size, sync_flags);
+}
+
kern_return_t experimental_vm_allocate_contiguous(host_priv, map, result_vaddr, result_paddr, size)
host_t host_priv;
vm_map_t map;
diff --git a/xen/console.c b/xen/console.c
index aed63cb5..e5aeb186 100644
--- a/xen/console.c
+++ b/xen/console.c
@@ -169,12 +169,12 @@ void hypcnstop()
{
}
-io_return_t hypcngetstat(dev_t dev, int flavor, int *data, unsigned int *count)
+io_return_t hypcngetstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count)
{
return tty_get_status(&hypcn_tty, flavor, data, count);
}
-io_return_t hypcnsetstat(dev_t dev, int flavor, int *data, unsigned int count)
+io_return_t hypcnsetstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count)
{
return tty_set_status(&hypcn_tty, flavor, data, count);
}
diff --git a/xen/console.h b/xen/console.h
index cd5fd5fc..527f5fbd 100644
--- a/xen/console.h
+++ b/xen/console.h
@@ -43,8 +43,8 @@ extern int hypcnopen(dev_t dev, int flag, io_req_t ior);
extern int hypcnread(dev_t dev, io_req_t ior);
extern int hypcnwrite(dev_t dev, io_req_t ior);
extern void hypcnclose(dev_t dev, int flag);
-extern io_return_t hypcngetstat(dev_t dev, int flavor, int *data, unsigned int *count);
-extern io_return_t hypcnsetstat(dev_t dev, int flavor, int *data, unsigned int count);
+extern io_return_t hypcngetstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count);
+extern io_return_t hypcnsetstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count);
extern int hypcnportdeath(dev_t dev, mach_port_t port);
#endif /* XEN_CONSOLE_H */